summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Lee <64482439+algojohnlee@users.noreply.github.com>2023-08-04 12:17:07 -0400
committerGitHub <noreply@github.com>2023-08-04 12:17:07 -0400
commitf239f8016fafafbfd85abe2c0b28a2c7344c9126 (patch)
tree5e362776b4df6541fdfdfcfbc9b7cf6f0704d851
parentcbccc6de4142432651aad4fcaed14b54c4822dde (diff)
parenta0e31a77656a9ab2ea32140e5b4cb15d371a2973 (diff)
Merge pull request #5633 from Algo-devops-service/relstable3.17.0v3.17.0-stable
-rw-r--r--.circleci/config.yml133
-rw-r--r--.github/workflows/benchmarks.yml6
-rw-r--r--.github/workflows/build.yml11
-rw-r--r--.github/workflows/codegen_verification.yml2
-rw-r--r--.github/workflows/container.yml4
-rw-r--r--.github/workflows/pr-type-category.yml2
-rw-r--r--.github/workflows/reviewdog.yml25
-rw-r--r--.github/workflows/tools.yml44
-rw-r--r--.gitignore3
-rw-r--r--.golangci-warnings.yml3
-rw-r--r--.golangci.yml31
-rw-r--r--Dockerfile10
-rw-r--r--Makefile24
-rw-r--r--README.md2
-rw-r--r--agreement/actor.go1
-rw-r--r--agreement/agreementtest/simulate_test.go23
-rw-r--r--agreement/autopsy.go5
-rw-r--r--agreement/bundle_test.go6
-rw-r--r--agreement/common_test.go16
-rw-r--r--agreement/cryptoRequestContext.go1
-rw-r--r--agreement/demux.go6
-rw-r--r--agreement/doc.go35
-rw-r--r--agreement/events.go12
-rw-r--r--agreement/msgp_gen.go771
-rw-r--r--agreement/proposalManager.go54
-rw-r--r--agreement/proposalStore.go80
-rw-r--r--agreement/proposalTracker.go48
-rw-r--r--agreement/pseudonode.go12
-rw-r--r--agreement/router.go1
-rw-r--r--agreement/sort.go10
-rw-r--r--agreement/voteAggregator.go52
-rw-r--r--agreement/voteAuxiliary.go11
-rw-r--r--agreement/voteTrackerContract.go29
-rw-r--r--buildnumber.dat2
-rw-r--r--catchup/catchpointService.go33
-rw-r--r--catchup/catchpointService_test.go6
-rw-r--r--catchup/fetcher_test.go4
-rw-r--r--catchup/ledgerFetcher.go60
-rw-r--r--catchup/ledgerFetcher_test.go110
-rw-r--r--catchup/peerSelector.go20
-rw-r--r--catchup/peerSelector_test.go52
-rw-r--r--catchup/service.go77
-rw-r--r--catchup/service_test.go64
-rw-r--r--catchup/universalFetcher.go13
-rw-r--r--catchup/universalFetcher_test.go16
-rw-r--r--cmd/algocfg/profileCommand.go11
-rw-r--r--cmd/algod/main.go24
-rw-r--r--cmd/algofix/doc.go3
-rw-r--r--cmd/algoh/main.go5
-rw-r--r--cmd/algokey/keyreg.go6
-rw-r--r--cmd/catchpointdump/file.go15
-rw-r--r--cmd/catchpointdump/net.go4
-rw-r--r--cmd/catchupsrv/download.go8
-rw-r--r--cmd/goal/account.go38
-rw-r--r--cmd/goal/application.go152
-rw-r--r--cmd/goal/asset.go102
-rw-r--r--cmd/goal/clerk.go34
-rw-r--r--cmd/goal/interact.go12
-rw-r--r--cmd/goal/network.go6
-rw-r--r--cmd/goal/node.go12
-rw-r--r--cmd/goal/tealsign.go12
-rw-r--r--cmd/incorporate/incorporate.go2
-rw-r--r--cmd/loadgenerator/main.go8
-rw-r--r--cmd/netgoal/generate.go12
-rw-r--r--cmd/netgoal/network.go10
-rw-r--r--cmd/opdoc/opdoc.go2
-rw-r--r--cmd/partitiontest_linter/go.mod8
-rw-r--r--cmd/partitiontest_linter/go.sum13
-rw-r--r--cmd/pingpong/runCmd.go30
-rw-r--r--cmd/tealdbg/local.go19
-rwxr-xr-xcmd/updater/update.sh2
-rw-r--r--components/mocks/mockCatchpointCatchupAccessor.go7
-rw-r--r--config/config.go8
-rw-r--r--config/config_test.go99
-rw-r--r--config/consensus.go46
-rw-r--r--config/defaultsGenerator/defaultsGenerator.go15
-rw-r--r--config/dnsbootstrap.go165
-rw-r--r--config/dnsbootstrap_test.go223
-rw-r--r--config/localTemplate.go75
-rw-r--r--config/local_defaults.go10
-rw-r--r--config/migrate.go11
-rw-r--r--config/version.go2
-rw-r--r--crypto/curve25519.go1
-rw-r--r--crypto/digest.go1
-rw-r--r--crypto/hashes.go3
-rw-r--r--crypto/merklearray/layer.go1
-rw-r--r--crypto/merklearray/merkle.go3
-rw-r--r--crypto/merklearray/msgp_gen.go28
-rw-r--r--crypto/merklearray/partial.go1
-rw-r--r--crypto/merklearray/proof.go49
-rw-r--r--crypto/merklearray/proof_test.go9
-rw-r--r--crypto/merklesignature/merkleSignatureScheme_test.go2
-rw-r--r--crypto/merklesignature/msgp_gen.go48
-rw-r--r--crypto/merkletrie/cache.go11
-rw-r--r--crypto/merkletrie/committer.go6
-rw-r--r--crypto/merkletrie/committer_test.go5
-rw-r--r--crypto/merkletrie/node.go4
-rw-r--r--crypto/msgp_gen.go378
-rw-r--r--crypto/multisig.go57
-rw-r--r--crypto/multisig_test.go1
-rw-r--r--crypto/secp256k1/curve.go2
-rw-r--r--crypto/stateproof/committableSignatureSlot.go1
-rw-r--r--crypto/stateproof/msgp_gen.go75
-rw-r--r--crypto/stateproof/prover.go6
-rw-r--r--crypto/stateproof/prover_test.go4
-rw-r--r--crypto/stateproof/structs.go9
-rw-r--r--crypto/stateproof/weights.go1
-rw-r--r--crypto/stateproof/weights_test.go11
-rw-r--r--crypto/vrf.go5
-rw-r--r--daemon/algod/api/algod.oas2.json145
-rw-r--r--daemon/algod/api/algod.oas3.yml156
-rw-r--r--daemon/algod/api/client/restClient.go12
-rw-r--r--daemon/algod/api/server/lib/middlewares/auth.go6
-rw-r--r--daemon/algod/api/server/lib/middlewares/cors.go2
-rw-r--r--daemon/algod/api/server/v2/account.go18
-rw-r--r--daemon/algod/api/server/v2/generated/data/routes.go357
-rw-r--r--daemon/algod/api/server/v2/generated/experimental/routes.go348
-rw-r--r--daemon/algod/api/server/v2/generated/model/types.go75
-rw-r--r--daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go359
-rw-r--r--daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go504
-rw-r--r--daemon/algod/api/server/v2/generated/participating/private/routes.go363
-rw-r--r--daemon/algod/api/server/v2/generated/participating/public/routes.go381
-rw-r--r--daemon/algod/api/server/v2/handlers.go65
-rw-r--r--daemon/algod/api/server/v2/handlers_test.go6
-rw-r--r--daemon/algod/api/server/v2/test/handlers_test.go5
-rw-r--r--daemon/algod/api/server/v2/test/helpers.go7
-rw-r--r--daemon/algod/api/server/v2/utils.go150
-rw-r--r--daemon/algod/api/spec/v2/msgp_gen.go20
-rw-r--r--daemon/algod/server.go14
-rw-r--r--daemon/kmd/api/api.go47
-rw-r--r--daemon/kmd/session/auth.go7
-rw-r--r--data/account/msgp_gen.go19
-rw-r--r--data/account/participation.go2
-rw-r--r--data/account/participationRegistry.go31
-rw-r--r--data/account/registeryDbOps.go16
-rw-r--r--data/basics/msgp_gen.go323
-rw-r--r--data/basics/overflow.go99
-rw-r--r--data/basics/sort.go5
-rw-r--r--data/basics/stateProofParticipant.go1
-rw-r--r--data/basics/teal.go41
-rw-r--r--data/basics/units.go6
-rw-r--r--data/basics/units_test.go126
-rw-r--r--data/basics/userBalance.go18
-rw-r--r--data/bookkeeping/block.go15
-rw-r--r--data/bookkeeping/genesis.go33
-rw-r--r--data/bookkeeping/genesis_test.go8
-rw-r--r--data/bookkeeping/msgp_gen.go582
-rw-r--r--data/bookkeeping/msgp_gen_test.go60
-rw-r--r--data/committee/committee.go1
-rw-r--r--data/committee/credential.go4
-rw-r--r--data/committee/msgp_gen.go32
-rw-r--r--data/committee/sortition/sortition.cpp16
-rw-r--r--data/committee/sortition/sortition.go65
-rw-r--r--data/committee/sortition/sortition.h16
-rw-r--r--data/committee/sortition/sortition_test.go64
-rw-r--r--data/hashable/msgp_gen.go8
-rw-r--r--data/ledger_test.go45
-rw-r--r--data/pools/transactionPool_test.go2
-rw-r--r--data/stateproofmsg/msgp_gen.go7
-rw-r--r--data/transactions/application.go12
-rw-r--r--data/transactions/common_test.go59
-rw-r--r--data/transactions/error.go1
-rw-r--r--data/transactions/logic/assembler.go82
-rw-r--r--data/transactions/logic/assembler_test.go46
-rw-r--r--data/transactions/logic/backwardCompat_test.go7
-rw-r--r--data/transactions/logic/box.go62
-rw-r--r--data/transactions/logic/debugger.go14
-rw-r--r--data/transactions/logic/doc.go2
-rw-r--r--data/transactions/logic/eval.go1230
-rw-r--r--data/transactions/logic/evalAppTxn_test.go110
-rw-r--r--data/transactions/logic/evalCrypto_test.go13
-rw-r--r--data/transactions/logic/evalStateful_test.go39
-rw-r--r--data/transactions/logic/eval_test.go39
-rw-r--r--data/transactions/logic/frames.go48
-rw-r--r--data/transactions/logic/mocktracer/scenarios.go2
-rw-r--r--data/transactions/logic/opcodes.go183
-rw-r--r--data/transactions/logic/opcodes_test.go13
-rw-r--r--data/transactions/logic/pairing.go30
-rw-r--r--data/transactions/logic/resources_test.go44
-rw-r--r--data/transactions/logic/tracer.go158
-rw-r--r--data/transactions/msgp_gen.go801
-rw-r--r--data/transactions/sort.go2
-rw-r--r--data/transactions/stateproof.go3
-rw-r--r--data/transactions/teal.go30
-rw-r--r--data/transactions/transaction.go10
-rw-r--r--data/transactions/verify/txn.go83
-rw-r--r--data/transactions/verify/txnBatch.go9
-rw-r--r--data/transactions/verify/txnBatch_test.go2
-rw-r--r--data/transactions/verify/txn_test.go57
-rw-r--r--data/txHandler.go6
-rw-r--r--docker/Dockerfile4
-rw-r--r--docker/README.md26
-rw-r--r--docker/build/Dockerfile4
-rw-r--r--docker/build/Dockerfile-deploy4
-rw-r--r--docker/build/aptly.Dockerfile2
-rw-r--r--docker/build/cicd.alpine.Dockerfile5
-rw-r--r--docker/build/cicd.centos.Dockerfile3
-rw-r--r--docker/build/cicd.centos8.Dockerfile4
-rw-r--r--docker/build/cicd.ubuntu.Dockerfile4
-rw-r--r--docker/build/docker.ubuntu.Dockerfile4
-rw-r--r--docker/build/releases-page.Dockerfile2
-rwxr-xr-xdocker/files/run/run.sh102
-rw-r--r--docs/follower_node.md1
-rw-r--r--gen/generate.go8
-rw-r--r--go.mod24
-rw-r--r--go.sum743
-rw-r--r--installer/config.json.example10
-rw-r--r--installer/debian/algorand-devtools/conffiles1
-rw-r--r--internal/rapidgen/rapidgenerators.go99
-rw-r--r--internal/rapidgen/tld.go1508
-rw-r--r--ledger/acctdeltas.go46
-rw-r--r--ledger/acctdeltas_test.go161
-rw-r--r--ledger/acctonline.go58
-rw-r--r--ledger/acctonline_expired_test.go5
-rw-r--r--ledger/acctonline_test.go125
-rw-r--r--ledger/acctupdates.go137
-rw-r--r--ledger/acctupdates_test.go202
-rw-r--r--ledger/apply/application.go22
-rw-r--r--ledger/apply/application_test.go44
-rw-r--r--ledger/apply/asset.go12
-rw-r--r--ledger/apply/asset_test.go33
-rw-r--r--ledger/apply/mockBalances_test.go58
-rw-r--r--ledger/apptxn_test.go2
-rw-r--r--ledger/archival_test.go2
-rw-r--r--ledger/catchpointtracker.go199
-rw-r--r--ledger/catchpointtracker_test.go36
-rw-r--r--ledger/catchpointwriter.go29
-rw-r--r--ledger/catchpointwriter_test.go50
-rw-r--r--ledger/catchupaccessor.go147
-rw-r--r--ledger/encoded/msgp_gen.go33
-rw-r--r--ledger/eval/cow.go9
-rw-r--r--ledger/eval/cow_test.go2
-rw-r--r--ledger/eval/eval.go40
-rw-r--r--ledger/eval/eval_test.go4
-rw-r--r--ledger/eval/prefetcher/prefetcher.go20
-rw-r--r--ledger/eval/txntracer.go32
-rw-r--r--ledger/evalbench_test.go2
-rw-r--r--ledger/ledger.go62
-rw-r--r--ledger/ledger_test.go142
-rw-r--r--ledger/ledgercore/error.go2
-rw-r--r--ledger/ledgercore/msgp_gen.go32
-rw-r--r--ledger/ledgercore/statedelta.go47
-rw-r--r--ledger/ledgercore/totals.go4
-rw-r--r--ledger/ledgercore/votersForRound.go2
-rw-r--r--ledger/metrics.go7
-rw-r--r--ledger/msgp_gen.go48
-rw-r--r--ledger/onlineaccountscache_test.go2
-rw-r--r--ledger/simulation/simulation_eval_test.go1409
-rw-r--r--ledger/simulation/simulator.go29
-rw-r--r--ledger/simulation/simulator_test.go2
-rw-r--r--ledger/simulation/trace.go111
-rw-r--r--ledger/simulation/tracer.go219
-rw-r--r--ledger/spverificationtracker.go8
-rw-r--r--ledger/spverificationtracker_test.go2
-rw-r--r--ledger/store/merkle_committer.go75
-rw-r--r--ledger/store/testing/helpers.go2
-rw-r--r--ledger/store/trackerdb/catchpoint.go5
-rw-r--r--ledger/store/trackerdb/data_test.go25
-rw-r--r--ledger/store/trackerdb/interface.go12
-rw-r--r--ledger/store/trackerdb/msgp_gen.go81
-rw-r--r--ledger/store/trackerdb/sqlitedriver/accountsV2.go45
-rw-r--r--ledger/store/trackerdb/sqlitedriver/catchpoint.go4
-rw-r--r--ledger/store/trackerdb/sqlitedriver/catchpointPendingHashesIter.go10
-rw-r--r--ledger/store/trackerdb/sqlitedriver/encodedAccountsIter.go11
-rw-r--r--ledger/store/trackerdb/sqlitedriver/kvsIter.go10
-rw-r--r--ledger/store/trackerdb/sqlitedriver/merkle_commiter.go18
-rw-r--r--ledger/store/trackerdb/sqlitedriver/orderedAccountsIter.go35
-rw-r--r--ledger/store/trackerdb/sqlitedriver/schema.go178
-rw-r--r--ledger/store/trackerdb/sqlitedriver/spVerificationAccessor.go5
-rw-r--r--ledger/store/trackerdb/sqlitedriver/sql.go91
-rw-r--r--ledger/store/trackerdb/sqlitedriver/sql_test.go28
-rw-r--r--ledger/store/trackerdb/sqlitedriver/sqlitedriver.go343
-rw-r--r--ledger/store/trackerdb/sqlitedriver/store_sqlite_impl.go288
-rw-r--r--ledger/store/trackerdb/sqlitedriver/testing.go68
-rw-r--r--ledger/store/trackerdb/sqlitedriver/trackerdbV2.go127
-rw-r--r--ledger/store/trackerdb/store.go157
-rw-r--r--ledger/store/trackerdb/testinterface.go29
-rw-r--r--ledger/testing/randomAccounts.go2
-rw-r--r--ledger/tracker.go8
-rw-r--r--ledger/trackerdb.go81
-rw-r--r--ledger/txtail.go4
-rw-r--r--ledger/txtail_test.go2
-rw-r--r--logging/logspec/agreement.go1
-rw-r--r--logging/logspec/ledger.go1
-rw-r--r--logging/logspec/root.go1
-rw-r--r--logging/telemetryConfig.go5
-rw-r--r--logging/telemetryhook.go3
-rw-r--r--logging/telemetryspec/metric.go2
-rw-r--r--netdeploy/networkTemplate.go6
-rw-r--r--netdeploy/remote/bootstrappedNetwork.go3
-rw-r--r--netdeploy/remote/buildConfig.go1
-rw-r--r--netdeploy/remote/deployedNetwork.go60
-rw-r--r--netdeploy/remote/nodeConfig.go1
-rw-r--r--netdeploy/remote/nodecfg/nodeDir.go51
-rw-r--r--network/limited_reader_slurper.go18
-rw-r--r--network/limited_reader_slurper_test.go45
-rw-r--r--network/msgOfInterest.go17
-rw-r--r--network/msgp_gen.go72
-rw-r--r--network/phonebook.go4
-rw-r--r--network/topics.go2
-rw-r--r--network/wsNetwork.go133
-rw-r--r--network/wsNetwork_test.go757
-rw-r--r--network/wsPeer.go129
-rw-r--r--network/wsPeer_test.go3
-rw-r--r--node/error.go23
-rw-r--r--node/follower_node.go8
-rw-r--r--node/follower_node_test.go4
-rw-r--r--node/indexer/db.go279
-rw-r--r--node/indexer/indexer.go162
-rw-r--r--node/indexer/indexer_test.go287
-rw-r--r--node/msgp_gen.go99
-rw-r--r--node/netprio.go6
-rw-r--r--node/node.go60
-rw-r--r--node/node_test.go56
-rw-r--r--protocol/codec_tester.go8
-rw-r--r--protocol/consensus.go6
-rw-r--r--protocol/hash_test.go2
-rw-r--r--protocol/msgp_gen.go69
-rw-r--r--protocol/stateproof.go1
-rw-r--r--protocol/tags.go97
-rw-r--r--protocol/tags_test.go166
-rw-r--r--protocol/test/msgp_gen.go8
-rw-r--r--protocol/txntype.go3
-rw-r--r--rpcs/blockService.go82
-rw-r--r--rpcs/blockService_test.go212
-rw-r--r--rpcs/ledgerService.go43
-rw-r--r--rpcs/ledgerService_test.go174
-rw-r--r--rpcs/msgp_gen.go10
-rw-r--r--scripts/buildtools/versions10
-rwxr-xr-xscripts/check_deps.sh6
-rwxr-xr-xscripts/configure_dev.sh3
-rwxr-xr-xscripts/generate_beta_config.sh24
-rwxr-xr-xscripts/generate_devnet_config.sh25
-rwxr-xr-xscripts/generate_testnet_config.sh26
-rwxr-xr-xscripts/get_golang_version.sh6
-rwxr-xr-xscripts/install_linux_deps.sh6
-rw-r--r--scripts/release/README.md10
-rw-r--r--scripts/release/common/docker/centos.Dockerfile2
-rw-r--r--scripts/release/common/docker/centos8.Dockerfile2
-rw-r--r--scripts/release/common/docker/setup.Dockerfile2
-rwxr-xr-xscripts/release/common/setup.sh4
-rwxr-xr-xscripts/release/test/util/test_package.sh6
-rwxr-xr-xscripts/travis/codegen_verification.sh2
-rwxr-xr-xscripts/travis/configure_dev.sh8
-rw-r--r--shared/pingpong/accounts.go6
-rw-r--r--shared/pingpong/pingpong.go20
-rw-r--r--stateproof/README.md46
-rw-r--r--stateproof/builder.go16
-rw-r--r--stateproof/db.go2
-rw-r--r--stateproof/msgp_gen.go32
-rw-r--r--stateproof/stateproofMessageGenerator.go1
-rw-r--r--stateproof/worker.go2
-rw-r--r--test/e2e-go/features/catchup/catchpointCatchup_test.go188
-rw-r--r--test/e2e-go/features/catchup/stateproofsCatchup_test.go9
-rw-r--r--test/e2e-go/features/devmode/devmode_test.go39
-rw-r--r--test/e2e-go/features/followernode/syncDeltas_test.go (renamed from test/e2e-go/features/followerNode/syncDeltas_test.go)2
-rw-r--r--test/e2e-go/features/followernode/syncRestart_test.go (renamed from test/e2e-go/features/followerNode/syncRestart_test.go)2
-rw-r--r--test/e2e-go/features/participation/onlineOfflineParticipation_test.go6
-rw-r--r--test/e2e-go/features/participation/overlappingParticipationKeys_test.go3
-rw-r--r--test/e2e-go/restAPI/restClient_test.go1270
-rw-r--r--test/e2e-go/upgrades/stateproof_participation_test.go11
-rw-r--r--test/heapwatch/client_ram_report.py91
-rwxr-xr-xtest/heapwatch/plot_crr_csv.py71
-rw-r--r--test/muleCI/mule.yaml2
-rwxr-xr-xtest/packages/test_release.sh4
-rwxr-xr-xtest/platform/test_linux_amd64_compatibility.sh6
-rw-r--r--test/reflectionhelpers/helpers.go16
-rwxr-xr-xtest/scripts/e2e_client_runner.py18
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-simulate.sh47
-rwxr-xr-xtest/scripts/e2e_subs/goal-account-asset.sh56
-rwxr-xr-xtest/scripts/e2e_subs/rawsend.sh51
-rw-r--r--test/scripts/e2e_subs/tealprogs/stack-scratch.teal45
-rw-r--r--test/testdata/configs/config-v27.json4
-rw-r--r--test/testdata/configs/config-v28.json121
-rw-r--r--test/testdata/deployednettemplates/configs/reference.json1
-rw-r--r--test/testdata/deployednettemplates/recipes/baseline/reference.json1
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile4
-rw-r--r--test/testdata/deployednettemplates/recipes/bootstrappedScenario/boostrappedFile.json7
-rw-r--r--test/testdata/deployednettemplates/recipes/custom/configs/node.json2
-rw-r--r--test/testdata/deployednettemplates/recipes/custom/configs/relay.json3
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1s/net.json88
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario1s/relay.json3
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario3s/genesis.json1
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario3s/net.json324
-rw-r--r--test/testdata/deployednettemplates/recipes/scenario3s/relay.json3
-rw-r--r--test/testdata/nettemplates/DevModeTxnTracerNetwork.json36
-rw-r--r--tools/block-generator/Makefile51
-rw-r--r--tools/block-generator/README.md103
-rw-r--r--tools/block-generator/generator/config.go362
-rw-r--r--tools/block-generator/generator/config_test.go244
-rw-r--r--tools/block-generator/generator/generate.go848
-rw-r--r--tools/block-generator/generator/generate_apps.go259
-rw-r--r--tools/block-generator/generator/generate_test.go524
-rw-r--r--tools/block-generator/generator/generator_ledger.go342
-rw-r--r--tools/block-generator/generator/generator_types.go174
-rw-r--r--tools/block-generator/generator/make_transactions.go174
-rw-r--r--tools/block-generator/generator/server.go23
-rw-r--r--tools/block-generator/generator/server_test.go37
-rw-r--r--tools/block-generator/generator/teal/poap_boxes.teal551
-rw-r--r--tools/block-generator/generator/teal/poap_clear.teal5
-rw-r--r--tools/block-generator/generator/teal/swap_amm.teal527
-rw-r--r--tools/block-generator/generator/teal/swap_clear.teal5
-rw-r--r--tools/block-generator/generator/test_scenario.yml (renamed from tools/block-generator/test_config.yml)0
-rw-r--r--tools/block-generator/go.mod12
-rw-r--r--tools/block-generator/go.sum847
-rwxr-xr-xtools/block-generator/run_tests.sh97
-rw-r--r--tools/block-generator/runner/run.go40
-rw-r--r--tools/block-generator/runner/runner.go4
-rw-r--r--tools/block-generator/scenarios/benchmarks/organic.25000.yml29
-rw-r--r--tools/block-generator/scenarios/benchmarks/organic.50000.yml29
-rw-r--r--tools/block-generator/scenarios/benchmarks/payment.25000.yml11
-rw-r--r--tools/block-generator/scenarios/benchmarks/payment.50000.yml11
-rw-r--r--tools/block-generator/scenarios/benchmarks/stress.25000.yml28
-rw-r--r--tools/block-generator/scenarios/benchmarks/stress.50000.yml28
-rw-r--r--tools/block-generator/scenarios/config.allmixed.jumbo.yml31
-rw-r--r--tools/block-generator/scenarios/config.allmixed.small.yml31
-rw-r--r--tools/block-generator/scenarios/config.appboxes.small.yml19
-rw-r--r--tools/block-generator/scenarios/config.appcreate.small.yml19
-rw-r--r--tools/block-generator/scripts/print_tps.py72
-rwxr-xr-xtools/block-generator/scripts/run_postgres.sh (renamed from tools/block-generator/run_postgres.sh)2
-rw-r--r--tools/block-generator/scripts/run_runner.py201
-rwxr-xr-xtools/block-generator/scripts/run_runner.sh (renamed from tools/block-generator/run_runner.sh)14
-rw-r--r--tools/block-generator/upload_metrics.py53
-rw-r--r--tools/block-generator/util/util.go3
-rw-r--r--tools/debug/carpenter/main.go1
-rw-r--r--tools/debug/dumpblocks/main.go2
-rw-r--r--tools/debug/transplanter/main.go477
-rw-r--r--tools/network/bootstrap.go38
-rw-r--r--tools/network/bootstrap_test.go66
-rw-r--r--tools/network/dnssec/config_windows.go34
-rw-r--r--tools/network/dnssec/dialer.go17
-rw-r--r--tools/network/dnssec/resolver.go1
-rw-r--r--tools/network/dnssec/sort_test.go16
-rw-r--r--tools/network/telemetryURIUpdateService.go21
-rw-r--r--tools/teal/tealcut/main.go26
-rw-r--r--tools/x-repo-types/go.mod2
-rw-r--r--tools/x-repo-types/typeAnalyzer/typeAnalyzer.go3
-rw-r--r--tools/x-repo-types/xrt_test.go18
-rw-r--r--util/bloom/bloom.go2
-rw-r--r--util/execpool/pool.go2
-rw-r--r--util/execpool/stream.go8
-rw-r--r--util/metrics/counter.go13
-rw-r--r--util/metrics/counter_test.go20
-rw-r--r--util/metrics/metrics.go2
-rw-r--r--util/metrics/tagcounter.go5
-rw-r--r--util/process_windows.go5
-rw-r--r--util/sleep_linux_32.go2
-rw-r--r--util/sleep_linux_64.go2
-rw-r--r--util/tcpinfo_linux.go1
449 files changed, 23116 insertions, 9152 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 34e111cad..d2898da25 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -5,14 +5,14 @@
version: 2.1
orbs:
- win: circleci/windows@2.3.0
- go: circleci/go@1.7.0
- slack: circleci/slack@4.10.1
+ win: circleci/windows@5.0.0
+ go: circleci/go@1.7.3
+ slack: circleci/slack@4.12.5
parameters:
ubuntu_image:
type: string
- default: "ubuntu-2004:202104-01"
+ default: "ubuntu-2004:2023.04.2"
build_dir:
type: string
default: "/opt/cibuild"
@@ -47,19 +47,20 @@ executors:
resource_class: arm.large
mac_amd64_medium:
macos:
- xcode: 13.2.1
- resource_class: medium
+ xcode: 14.2.0
+ resource_class: macos.x86.medium.gen2
environment:
HOMEBREW_NO_AUTO_UPDATE: "true"
mac_amd64_large:
macos:
- xcode: 13.2.1
- resource_class: large
+ xcode: 14.2.0
+ # Since they removed the large class for amd64, we will use medium here too.
+ resource_class: macos.x86.medium.gen2
environment:
HOMEBREW_NO_AUTO_UPDATE: "true"
mac_arm64: &executor-mac-arm64
machine: true
- resource_class: algorand/macstadium-m1-macos11
+ resource_class: algorand/macstadium-m1
environment:
HOMEBREW_NO_AUTO_UPDATE: "true"
# these are required b/c jobs explicitly assign sizes to the executors
@@ -79,7 +80,7 @@ slack-fail-stop-step: &slack-fail-post-step
# ===== Workflow Definitions =====
workflows:
version: 2
- "circleci_build_and_test":
+ nightly_build_and_test:
jobs:
- build_nightly:
name: << matrix.platform >>_build_nightly
@@ -94,19 +95,6 @@ workflows:
context: slack-secrets
<<: *slack-fail-post-step
- - test:
- name: << matrix.platform >>_test
- matrix: &matrix-default
- parameters:
- platform: ["amd64", "arm64"]
- filters: &filters-default
- branches:
- ignore:
- - /rel\/.*/
- - << pipeline.parameters.valid_nightly_branch >>
- context: slack-secrets
- <<: *slack-fail-post-step
-
- test_nightly:
name: << matrix.platform >>_test_nightly
matrix:
@@ -116,15 +104,6 @@ workflows:
context: slack-secrets
<<: *slack-fail-post-step
- - integration:
- name: << matrix.platform >>_integration
- matrix:
- <<: *matrix-default
- filters:
- <<: *filters-default
- context: slack-secrets
- <<: *slack-fail-post-step
-
- integration_nightly:
name: << matrix.platform >>_integration_nightly
matrix:
@@ -134,15 +113,6 @@ workflows:
context: slack-secrets
<<: *slack-fail-post-step
- - e2e_expect:
- name: << matrix.platform >>_e2e_expect
- matrix:
- <<: *matrix-default
- filters:
- <<: *filters-default
- context: slack-secrets
- <<: *slack-fail-post-step
-
- e2e_expect_nightly:
name: << matrix.platform >>_e2e_expect_nightly
matrix:
@@ -152,15 +122,6 @@ workflows:
context: slack-secrets
<<: *slack-fail-post-step
- - e2e_subs:
- name: << matrix.platform >>_e2e_subs
- matrix:
- <<: *matrix-default
- filters:
- <<: *filters-default
- context: slack-secrets
- <<: *slack-fail-post-step
-
- e2e_subs_nightly:
name: << matrix.platform >>_e2e_subs_nightly
matrix:
@@ -172,17 +133,6 @@ workflows:
- aws-secrets
<<: *slack-fail-post-step
- - tests_verification_job:
- name: << matrix.platform >>_<< matrix.job_type >>_verification
- matrix:
- parameters:
- platform: ["amd64", "arm64"]
- job_type: ["test", "integration", "e2e_expect"]
- requires:
- - << matrix.platform >>_<< matrix.job_type >>
- context: slack-secrets
- <<: *slack-fail-post-step
-
- tests_verification_job_nightly:
name: << matrix.platform >>_<< matrix.job_type >>_verification
matrix:
@@ -203,17 +153,63 @@ workflows:
- << matrix.platform >>_integration_nightly_verification
- << matrix.platform >>_e2e_expect_nightly_verification
- << matrix.platform >>_e2e_subs_nightly
- filters:
- branches:
- only:
- - /rel\/.*/
- - << pipeline.parameters.valid_nightly_branch >>
context:
- slack-secrets
- aws-secrets
<<: *slack-fail-post-step
- #- windows_x64_build
+ "circleci_build_and_test":
+ jobs:
+ - test:
+ name: << matrix.platform >>_test
+ matrix: &matrix-default
+ parameters:
+ platform: ["amd64"]
+ filters: &filters-default
+ branches:
+ ignore:
+ - /rel\/.*/
+ - << pipeline.parameters.valid_nightly_branch >>
+ context: slack-secrets
+ <<: *slack-fail-post-step
+
+ - integration:
+ name: << matrix.platform >>_integration
+ matrix:
+ <<: *matrix-default
+ filters:
+ <<: *filters-default
+ context: slack-secrets
+ <<: *slack-fail-post-step
+
+ - e2e_expect:
+ name: << matrix.platform >>_e2e_expect
+ matrix:
+ <<: *matrix-default
+ filters:
+ <<: *filters-default
+ context: slack-secrets
+ <<: *slack-fail-post-step
+
+ - e2e_subs:
+ name: << matrix.platform >>_e2e_subs
+ matrix:
+ <<: *matrix-default
+ filters:
+ <<: *filters-default
+ context: slack-secrets
+ <<: *slack-fail-post-step
+
+ - tests_verification_job:
+ name: << matrix.platform >>_<< matrix.job_type >>_verification
+ matrix:
+ parameters:
+ platform: ["amd64"]
+ job_type: ["test", "integration", "e2e_expect"]
+ requires:
+ - << matrix.platform >>_<< matrix.job_type >>
+ context: slack-secrets
+ <<: *slack-fail-post-step
# ===== Job Definitions =====
jobs:
@@ -461,7 +457,7 @@ commands:
shell: bash.exe
command: |
choco install -y msys2 pacman make wget --force
- choco install -y golang --version=1.17.13 --force
+ choco install -y golang --version=1.20.5 --force
choco install -y python3 --version=3.7.3 --force
export msys2='cmd //C RefreshEnv.cmd '
export msys2+='& set MSYS=winsymlinks:nativestrict '
@@ -606,6 +602,7 @@ commands:
export PACKAGE_NAMES=$(echo $PACKAGES | tr -d '\n')
export PARTITION_TOTAL=${CIRCLE_NODE_TOTAL}
export PARTITION_ID=${CIRCLE_NODE_INDEX}
+ export GOEXPERIMENT="none"
gotestsum --format standard-verbose --junitfile << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/results.xml --jsonfile << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/testresults.json -- --tags "sqlite_unlock_notify sqlite_omit_load_extension" << parameters.short_test_flag >> -race -timeout 1h -coverprofile=coverage.txt -covermode=atomic -p 1 $PACKAGE_NAMES
- store_artifacts:
path: << parameters.result_path >>
diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml
index 51bacd0c8..5e11c6cac 100644
--- a/.github/workflows/benchmarks.yml
+++ b/.github/workflows/benchmarks.yml
@@ -17,8 +17,8 @@ jobs:
name: Performance regression check
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-go@v3
+ - uses: actions/checkout@v3.5.3
+ - uses: actions/setup-go@v4.0.1
with:
go-version-file: 'go.mod'
- run: go version
@@ -29,7 +29,7 @@ jobs:
# degradation, (2) `eval` degradation. (2) suggests a broader performance
# issue.
- name: Run benchmark
- run: go test ./data/transactions/logic -bench 'BenchmarkUintMath' | tee benchmark_output.txt
+ run: go test ./data/transactions/logic -bench 'BenchmarkUintMath' -run=^$ | tee benchmark_output.txt
- name: Push benchmark result to gh-pages branch
if: github.event_name == 'push'
uses: benchmark-action/github-action-benchmark@v1
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 56ab06793..785304b0d 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -17,16 +17,19 @@ jobs:
update: true
path-type: inherit
- name: Check out code
- uses: actions/checkout@v3
+ uses: actions/checkout@v3.5.3
with:
fetch-depth: 0
+ - name: Determine Go version
+ id: go_version
+ run: echo "GO_VERSION=$(./scripts/get_golang_version.sh)" >> $GITHUB_ENV
- name: Install golang
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v4.0.1
with:
- go-version: "1.17.13"
+ go-version: ${{ env.GO_VERSION }}
- name: Restore libsodium from cache
id: cache-libsodium
- uses: actions/cache@v3
+ uses: actions/cache@v3.3.1
with:
path: crypto/libs
key: libsodium-fork-v2-${{ runner.os }}-${{ hashFiles('crypto/libsodium-fork/**') }}
diff --git a/.github/workflows/codegen_verification.yml b/.github/workflows/codegen_verification.yml
index 8b6bcf046..362f0c7ad 100644
--- a/.github/workflows/codegen_verification.yml
+++ b/.github/workflows/codegen_verification.yml
@@ -9,7 +9,7 @@ jobs:
runs-on: ubuntu-20.04
steps:
- name: Check out code
- uses: actions/checkout@v3
+ uses: actions/checkout@v3.5.3
with:
fetch-depth: 0
path: go-algorand
diff --git a/.github/workflows/container.yml b/.github/workflows/container.yml
index d279f141c..75134a6d6 100644
--- a/.github/workflows/container.yml
+++ b/.github/workflows/container.yml
@@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout Code
- uses: actions/checkout@v3
+ uses: actions/checkout@v3.5.3
- name: Generate Container Metadata
id: meta
@@ -59,7 +59,7 @@ jobs:
# if: github.ref == format('refs/heads/{0}', 'master')
# steps:
# - name: Checkout Code
- # uses: actions/checkout@v3
+ # uses: actions/checkout@v3.5.3
# - name: Update DockerHub Repository Description
# uses: peter-evans/dockerhub-description@v3
diff --git a/.github/workflows/pr-type-category.yml b/.github/workflows/pr-type-category.yml
index 478b0b90e..b496e862a 100644
--- a/.github/workflows/pr-type-category.yml
+++ b/.github/workflows/pr-type-category.yml
@@ -10,7 +10,7 @@ jobs:
name: Check PR Category and Type
steps:
- name: Checking for correct number of required github pr labels
- uses: mheap/github-action-required-labels@v2
+ uses: mheap/github-action-required-labels@v5
with:
mode: exactly
count: 1
diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml
index fcba97537..46c547073 100644
--- a/.github/workflows/reviewdog.yml
+++ b/.github/workflows/reviewdog.yml
@@ -10,19 +10,17 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
- uses: actions/checkout@v3
+ uses: actions/checkout@v3.5.3
with:
fetch-depth: 0 # required for new-from-rev option in .golangci.yml
- - name: Install libraries
- run: sudo apt-get -y -q install libboost-math-dev
# move go out of the way temporarily to avoid "go list ./..." from installing modules
- name: Make libsodium.a
run: sudo mv /usr/bin/go /usr/bin/go.bak && make crypto/libs/linux/amd64/lib/libsodium.a && sudo mv /usr/bin/go.bak /usr/bin/go
- name: reviewdog-golangci-lint
- uses: reviewdog/action-golangci-lint@v2
+ uses: reviewdog/action-golangci-lint@v2.3.1
with:
go_version_file: go.mod
- golangci_lint_version: "v1.47.3"
+ golangci_lint_version: "v1.53.2"
golangci_lint_flags: "-c .golangci.yml --allow-parallel-runners"
reporter: "github-pr-check"
tool_name: "Lint Errors"
@@ -34,11 +32,9 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
- uses: actions/checkout@v3
+ uses: actions/checkout@v3.5.3
with:
fetch-depth: 0 # required for new-from-rev option in .golangci.yml
- - name: Install libraries
- run: sudo apt-get -y -q install libboost-math-dev
# move go out of the way temporarily to avoid "go list ./..." from installing modules
- name: Make libsodium.a
run: sudo mv /usr/bin/go /usr/bin/go.bak && make crypto/libs/linux/amd64/lib/libsodium.a && sudo mv /usr/bin/go.bak /usr/bin/go
@@ -46,25 +42,28 @@ jobs:
run: |
echo "$GITHUB_WORKSPACE/bin" >> $GITHUB_PATH
echo "$RUNNER_WORKSPACE/$(basename $GITHUB_REPOSITORY)/bin" >> $GITHUB_PATH
+ - name: Determine Go version
+ id: go_version
+ run: echo "GO_VERSION=$(./scripts/get_golang_version.sh)" >> $GITHUB_ENV
- name: Install specific golang
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v4.0.1
with:
- go-version: '1.17.13'
+ go-version: ${{ env.GO_VERSION }}
- name: Create folders for golangci-lint
run: mkdir -p cicdtmp/golangci-lint
- name: Check if custom golangci-lint is already built
id: cache-golangci-lint
- uses: actions/cache@v3
+ uses: actions/cache@v3.3.1
with:
path: cicdtmp/golangci-lint/golangci-lint-cgo
- key: cicd-golangci-lint-cgo-v0.0.1
+ key: cicd-golangci-lint-cgo-v0.0.2
- name: Build custom golangci-lint with CGO_ENABLED
if: steps.cache-golangci-lint.outputs.cache-hit != 'true'
run: |
cd cicdtmp/golangci-lint
git clone https://github.com/golangci/golangci-lint.git .
- git checkout tags/v1.47.3
+ git checkout tags/v1.53.2
CGO_ENABLED=true go build -trimpath -o golangci-lint-cgo ./cmd/golangci-lint
./golangci-lint-cgo --version
cd ../../
diff --git a/.github/workflows/tools.yml b/.github/workflows/tools.yml
new file mode 100644
index 000000000..90b90cec8
--- /dev/null
+++ b/.github/workflows/tools.yml
@@ -0,0 +1,44 @@
+# Test tools directories that have their own go.mod files, and
+# otherwise wouldn't get tested along with the main go-algorand build.
+name: Test tools modules
+on:
+ push:
+ branches:
+ - master
+ paths:
+ - 'tools/block-generator/**'
+ - 'tools/x-repo-types/**'
+ pull_request:
+ paths:
+ - 'tools/block-generator/**'
+ - 'tools/x-repo-types/**'
+
+jobs:
+ tools_test:
+ name: Test tools modules
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check out code into the Go module directory
+ uses: actions/checkout@v3.5.3
+ # move go out of the way temporarily to avoid "go list ./..." from installing modules
+ - name: Make libsodium.a
+ run: sudo mv /usr/bin/go /usr/bin/go.bak && make crypto/libs/linux/amd64/lib/libsodium.a && sudo mv /usr/bin/go.bak /usr/bin/go
+ - name: Add bin to PATH
+ run: |
+ echo "$GITHUB_WORKSPACE/bin" >> $GITHUB_PATH
+ echo "$RUNNER_WORKSPACE/$(basename $GITHUB_REPOSITORY)/bin" >> $GITHUB_PATH
+ - name: Determine Go version
+ id: go_version
+ run: echo "GO_VERSION=$(./scripts/get_golang_version.sh)" >> $GITHUB_ENV
+ - name: Install go version
+ uses: actions/setup-go@v4.0.1
+ with:
+ go-version: ${{ env.GO_VERSION }}
+ - name: Test tools/block-generator
+ run: |
+ cd tools/block-generator
+ go test -v ./...
+ - name: Test tools/x-repo-types
+ run: |
+ cd tools/x-repo-types
+ go test -v ./...
diff --git a/.gitignore b/.gitignore
index 7c774b834..8deb3a2d9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -74,3 +74,6 @@ tools/block-generator/block-generator
# cross repo types tool binary
tools/x-repo-types/x-repo-types
+
+# python virtual environment
+.venv
diff --git a/.golangci-warnings.yml b/.golangci-warnings.yml
index 440a07f7d..6e58b187c 100644
--- a/.golangci-warnings.yml
+++ b/.golangci-warnings.yml
@@ -5,11 +5,8 @@ run:
linters:
disable-all: true
enable:
- - deadcode
- gosec
- partitiontest
- - structcheck
- - varcheck
- unused
linters-settings:
diff --git a/.golangci.yml b/.golangci.yml
index 40299509b..24dba2d0c 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -33,8 +33,15 @@ linters-settings:
exclude-functions:
# We do this 121 times and never check the error.
- (*github.com/spf13/cobra.Command).MarkFlagRequired
+ - (*github.com/spf13/pflag.FlagSet).MarkDeprecated
+ - (*github.com/spf13/pflag.FlagSet).MarkShorthandDeprecated
govet:
+ check-shadowing: true
settings:
+ shadow:
+ # explanation of strict vs non-strict:
+ # https://github.com/golang/tools/blob/v0.7.0/go/analysis/passes/shadow/shadow.go#L104-L122
+ strict: false
printf:
# Comma-separated list of print function names to check (in addition to default, see `go tool vet help printf`).
# Default: []
@@ -95,7 +102,15 @@ issues:
# "EXC0005 staticcheck: Developers tend to write in C-style with an explicit 'break' in a 'switch', so it's ok to ignore"
- ineffective break statement. Did you mean to break out of the outer loop
# revive: irrelevant error about naming
- - "var-naming: don't use leading k in Go names"
+ - "^var-naming: don't use leading k in Go names"
+ # revive: ignore unused-paramter, package-comments, unexported-return, redefines-builtin-id, var-declaration, empty-block, superfluous-else
+ - "^unused-parameter: parameter"
+ - "^package-comments: should have a package comment"
+ - "^unexported-return: "
+ - "^redefines-builtin-id: redefinition of"
+ - "^var-declaration: should"
+ - "^empty-block: this block is empty, you can remove it"
+ - "^superfluous-else: if block ends with"
exclude-rules:
- path: _test\.go
@@ -110,8 +125,20 @@ issues:
# - nilerr
- nolintlint
# - revive
- - staticcheck
+ # - staticcheck
- typecheck
+ - path: _test\.go
+ linters:
+ - staticcheck
+ text: "SA4006: this value" # of X is never used
+ - linters:
+ - staticcheck
+ text: "SA1019: rand*"
+ # allow shadowing in test code
+ - path: _test\.go
+ linters:
+ - govet
+ text: "shadows declaration at line"
# Ignore missing parallel tests in existing packages
- path: ^agreement.*_test\.go
linters:
diff --git a/Dockerfile b/Dockerfile
index 859e602d4..0c6c370c9 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,6 +1,6 @@
-FROM ubuntu:18.04 as builder
+FROM ubuntu:20.04 as builder
-ARG GO_VERSION="1.17.13"
+ARG GO_VERSION="1.20.5"
ARG CHANNEL
ARG URL
@@ -41,12 +41,14 @@ RUN /dist/files/build/install.sh \
-b "${BRANCH}" \
-s "${SHA}"
-FROM debian:bullseye-slim as final
+FROM debian:bookworm-20230703-slim as final
ENV PATH="/node/bin:${PATH}" ALGOD_PORT="8080" KMD_PORT="7833" ALGORAND_DATA="/algod/data"
# curl is needed to lookup the fast catchup url
-RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates curl && \
+RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates curl gosu && \
+ update-ca-certificates && \
+ apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
mkdir -p "$ALGORAND_DATA" && \
groupadd --gid=999 --system algorand && \
diff --git a/Makefile b/Makefile
index 450ab6999..0c4974402 100644
--- a/Makefile
+++ b/Makefile
@@ -49,9 +49,9 @@ export CPATH=/opt/homebrew/include
export LIBRARY_PATH=/opt/homebrew/lib
endif
endif
+
ifeq ($(UNAME), Linux)
EXTLDFLAGS := -static-libstdc++ -static-libgcc
-ifeq ($(ARCH), amd64)
# the following predicate is abit misleading; it tests if we're not in centos.
ifeq (,$(wildcard /etc/centos-release))
EXTLDFLAGS += -static
@@ -59,14 +59,6 @@ endif
GOTAGSLIST += osusergo netgo static_build
GOBUILDMODE := -buildmode pie
endif
-ifeq ($(ARCH), arm)
-ifneq ("$(wildcard /etc/alpine-release)","")
-EXTLDFLAGS += -static
-GOTAGSLIST += osusergo netgo static_build
-GOBUILDMODE := -buildmode pie
-endif
-endif
-endif
ifneq (, $(findstring MINGW,$(UNAME)))
EXTLDFLAGS := -static -static-libstdc++ -static-libgcc
@@ -77,6 +69,11 @@ ifeq ($(SHORT_PART_PERIOD), 1)
export SHORT_PART_PERIOD_FLAG := -s
endif
+# Disable go experiments during build as of go 1.20.5 due to
+# https://github.com/golang/go/issues/60825
+# Likely fix: https://go-review.googlesource.com/c/go/+/503937/6/src/runtime/race_arm64.s
+export GOEXPERIMENT=none
+
GOTAGS := --tags "$(GOTAGSLIST)"
GOTRIMPATH := $(shell GOPATH=$(GOPATH) && go help build | grep -q .-trimpath && echo -trimpath)
@@ -92,6 +89,8 @@ GOLDFLAGS := $(GOLDFLAGS_BASE) \
UNIT_TEST_SOURCES := $(sort $(shell GOPATH=$(GOPATH) && GO111MODULE=off && go list ./... | grep -v /go-algorand/test/ ))
ALGOD_API_PACKAGES := $(sort $(shell GOPATH=$(GOPATH) && GO111MODULE=off && cd daemon/algod/api; go list ./... ))
+GOMOD_DIRS := ./tools/block-generator ./tools/x-repo-types
+
MSGP_GENERATE := ./protocol ./protocol/test ./crypto ./crypto/merklearray ./crypto/merklesignature ./crypto/stateproof ./data/basics ./data/transactions ./data/stateproofmsg ./data/committee ./data/bookkeeping ./data/hashable ./agreement ./rpcs ./network ./node ./ledger ./ledger/ledgercore ./ledger/store/trackerdb ./ledger/encoded ./stateproof ./data/account ./daemon/algod/api/spec/v2
default: build
@@ -115,7 +114,12 @@ check_go_version:
fi
tidy: check_go_version
+ @echo "Tidying go-algorand"
go mod tidy -compat=$(GOLANG_VERSION_SUPPORT)
+ @for dir in $(GOMOD_DIRS); do \
+ echo "Tidying $$dir" && \
+ (cd $$dir && go mod tidy -compat=$(GOLANG_VERSION_SUPPORT)); \
+ done
check_shell:
find . -type f -name "*.sh" -exec shellcheck {} +
@@ -151,7 +155,7 @@ ALWAYS:
# build our fork of libsodium, placing artifacts into crypto/lib/ and crypto/include/
crypto/libs/$(OS_TYPE)/$(ARCH)/lib/libsodium.a:
mkdir -p crypto/copies/$(OS_TYPE)/$(ARCH)
- cp -R crypto/libsodium-fork crypto/copies/$(OS_TYPE)/$(ARCH)/libsodium-fork
+ cp -R crypto/libsodium-fork/. crypto/copies/$(OS_TYPE)/$(ARCH)/libsodium-fork
cd crypto/copies/$(OS_TYPE)/$(ARCH)/libsodium-fork && \
./autogen.sh --prefix $(SRCPATH)/crypto/libs/$(OS_TYPE)/$(ARCH) && \
./configure --disable-shared --prefix="$(SRCPATH)/crypto/libs/$(OS_TYPE)/$(ARCH)" && \
diff --git a/README.md b/README.md
index b8e3f8207..682d88436 100644
--- a/README.md
+++ b/README.md
@@ -22,7 +22,7 @@ the [official Go documentation website](https://golang.org/doc/).
### Linux / OSX
-We currently strive to support Debian-based distributions with Ubuntu 18.04
+We currently strive to support Debian-based distributions with Ubuntu 20.04
being our official release target.
Building on Arch Linux works as well.
Our core engineering team uses Linux and OSX, so both environments are well
diff --git a/agreement/actor.go b/agreement/actor.go
index e02c5eb4c..fa5e0892a 100644
--- a/agreement/actor.go
+++ b/agreement/actor.go
@@ -62,7 +62,6 @@ type actorContract interface {
}
// A checkedActor wraps an actor, checking its contract on each call.
-//
type checkedActor struct {
actor
actorContract
diff --git a/agreement/agreementtest/simulate_test.go b/agreement/agreementtest/simulate_test.go
index 051659207..53c42411e 100644
--- a/agreement/agreementtest/simulate_test.go
+++ b/agreement/agreementtest/simulate_test.go
@@ -27,6 +27,7 @@ import (
"github.com/algorand/go-deadlock"
"github.com/stretchr/testify/require"
+ "golang.org/x/exp/maps"
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
@@ -124,24 +125,10 @@ func makeTestLedger(state map[basics.Address]basics.AccountData) agreement.Ledge
func (l *testLedger) copy() *testLedger {
dup := new(testLedger)
- dup.entries = make(map[basics.Round]bookkeeping.Block)
- dup.certs = make(map[basics.Round]agreement.Certificate)
- dup.state = make(map[basics.Address]basics.AccountData)
- dup.notifications = make(map[basics.Round]signal)
-
- for k, v := range l.entries {
- dup.entries[k] = v
- }
- for k, v := range l.certs {
- dup.certs[k] = v
- }
- for k, v := range l.state {
- dup.state[k] = v
- }
- for k, v := range dup.notifications {
- // note that old opened channels will now fire when these are closed
- dup.notifications[k] = v
- }
+ dup.entries = maps.Clone(l.entries)
+ dup.certs = maps.Clone(l.certs)
+ dup.state = maps.Clone(l.state)
+ dup.notifications = maps.Clone(l.notifications) // old opened channels will now fire when these are closed
dup.nextRound = l.nextRound
return dup
diff --git a/agreement/autopsy.go b/agreement/autopsy.go
index e940ae4bc..30f41e13c 100644
--- a/agreement/autopsy.go
+++ b/agreement/autopsy.go
@@ -24,6 +24,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
+ "golang.org/x/exp/slices"
)
// An Autopsy is a trace of the ordered input events and output
@@ -102,9 +103,7 @@ func (m *multiCloser) Close() error {
// makeMultiCloser returns a Closer that closes all the given closers.
func makeMultiCloser(closers ...io.Closer) io.Closer {
- r := make([]io.Closer, len(closers))
- copy(r, closers)
- return &multiCloser{r}
+ return &multiCloser{slices.Clone(closers)}
}
type autopsyTrace struct {
diff --git a/agreement/bundle_test.go b/agreement/bundle_test.go
index 4542b0cd7..7bbb42f2e 100644
--- a/agreement/bundle_test.go
+++ b/agreement/bundle_test.go
@@ -108,7 +108,7 @@ func makeBundlePanicWrapper(t *testing.T, message string, proposal proposalValue
return uab
}
-//Test Bundle Creation with Validation for duplicate votes from same sender
+// Test Bundle Creation with Validation for duplicate votes from same sender
func TestBundleCreationWithVotesFromSameAddress(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -173,7 +173,7 @@ func TestBundleCreationWithVotesFromSameAddress(t *testing.T) {
}
-//Test Bundle Creation with Validation
+// Test Bundle Creation with Validation
func TestBundleCreationWithEquivocationVotes(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -283,7 +283,7 @@ func TestBundleCreationWithEquivocationVotes(t *testing.T) {
}
-//Test Bundle Creation with Validation
+// Test Bundle Creation with Validation
func TestBundleCertificationWithEquivocationVotes(t *testing.T) {
partitiontest.PartitionTest(t)
diff --git a/agreement/common_test.go b/agreement/common_test.go
index 8f2ad8c4f..0c11d9553 100644
--- a/agreement/common_test.go
+++ b/agreement/common_test.go
@@ -24,6 +24,7 @@ import (
"github.com/algorand/go-deadlock"
"github.com/stretchr/testify/require"
+ "golang.org/x/exp/maps"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
@@ -207,10 +208,7 @@ func makeTestLedger(state map[basics.Address]basics.AccountData) Ledger {
l.certs = make(map[basics.Round]Certificate)
l.nextRound = 1
- l.state = make(map[basics.Address]basics.AccountData)
- for k, v := range state {
- l.state[k] = v
- }
+ l.state = maps.Clone(state)
l.notifications = make(map[basics.Round]signal)
@@ -226,10 +224,7 @@ func makeTestLedgerWithConsensusVersion(state map[basics.Address]basics.AccountD
l.certs = make(map[basics.Round]Certificate)
l.nextRound = 1
- l.state = make(map[basics.Address]basics.AccountData)
- for k, v := range state {
- l.state[k] = v
- }
+ l.state = maps.Clone(state)
l.notifications = make(map[basics.Round]signal)
@@ -245,10 +240,7 @@ func makeTestLedgerMaxBlocks(state map[basics.Address]basics.AccountData, maxNum
l.maxNumBlocks = maxNumBlocks
- l.state = make(map[basics.Address]basics.AccountData)
- for k, v := range state {
- l.state[k] = v
- }
+ l.state = maps.Clone(state)
l.notifications = make(map[basics.Round]signal)
diff --git a/agreement/cryptoRequestContext.go b/agreement/cryptoRequestContext.go
index 8807e3f57..a9c512a51 100644
--- a/agreement/cryptoRequestContext.go
+++ b/agreement/cryptoRequestContext.go
@@ -47,6 +47,7 @@ type roundRequestsContext struct {
}
// pendingRequests keeps the context for all pending requests
+//
//msgp:ignore pendingRequestsContext
type pendingRequestsContext map[round]roundRequestsContext
diff --git a/agreement/demux.go b/agreement/demux.go
index 33584490f..f31c4d075 100644
--- a/agreement/demux.go
+++ b/agreement/demux.go
@@ -125,9 +125,9 @@ func (d *demux) tokenizeMessages(ctx context.Context, net Network, tag protocol.
if err != nil {
warnMsg := fmt.Sprintf("disconnecting from peer: error decoding message tagged %v: %v", tag, err)
// check protocol version
- cv, err := d.ledger.ConsensusVersion(d.ledger.NextRound())
- if err == nil {
- if _, ok := config.Consensus[cv]; !ok {
+ cv, cvErr := d.ledger.ConsensusVersion(d.ledger.NextRound())
+ if cvErr == nil {
+ if _, found := config.Consensus[cv]; !found {
warnMsg = fmt.Sprintf("received proposal message was ignored. The node binary doesn't support the next network consensus (%v) and would no longer be able to process agreement messages", cv)
}
}
diff --git a/agreement/doc.go b/agreement/doc.go
index 0d4ae97af..fbf3ee433 100644
--- a/agreement/doc.go
+++ b/agreement/doc.go
@@ -23,20 +23,27 @@
//
// Clients instantiate an Service by providing it several
// parameters:
-// - Ledger represents a data store which supports the reading and
-// writing of data stored within Blocks.
-// - BlockFactory produces Blocks for a given round.
-// - BlockValidator validates Blocks for a given round.
-// - KeyManager holds the participation keys necessary to participate
-// in the protocol.
-// - Network provides an abstraction over the underlying network.
-// - timers.Clock provides timekeeping services for timeouts.
-// - db.Accessor provides persistent storage for internal state.
-//
-// Blocks for which consensus is completed are written using
-// Ledger.EnsureBlock alongside Certificate objects, which are
-// cryptographic proofs that a Block was confirmed for a given
-// round.
+//
+// - Ledger represents a data store which supports the reading and
+// writing of data stored within Blocks.
+//
+// - BlockFactory produces Blocks for a given round.
+//
+// - BlockValidator validates Blocks for a given round.
+//
+// - KeyManager holds the participation keys necessary to participate
+// in the protocol.
+//
+// - Network provides an abstraction over the underlying network.
+//
+// - timers.Clock provides timekeeping services for timeouts.
+//
+// - db.Accessor provides persistent storage for internal state.
+//
+// Blocks for which consensus is completed are written using
+// Ledger.EnsureBlock alongside Certificate objects, which are
+// cryptographic proofs that a Block was confirmed for a given
+// round.
//
// If Ledger and db.Accessor provide crash-safe storage, agreement
// will also recover safely after crashes.
diff --git a/agreement/events.go b/agreement/events.go
index a4c717cbd..52737e5f2 100644
--- a/agreement/events.go
+++ b/agreement/events.go
@@ -669,12 +669,12 @@ func (e thresholdEvent) ComparableStr() string {
//
// The ordering is given as follows:
//
-// - certThreshold events are fresher than all other non-certThreshold events.
-// - Events from a later period are fresher than events from an older period.
-// - nextThreshold events are fresher than softThreshold events from the same
-// period.
-// - nextThreshold events for the bottom proposal-value are fresher than
-// nextThreshold events for some other value.
+// - certThreshold events are fresher than all other non-certThreshold events.
+// - Events from a later period are fresher than events from an older period.
+// - nextThreshold events are fresher than softThreshold events from the same
+// period.
+// - nextThreshold events for the bottom proposal-value are fresher than
+// nextThreshold events for some other value.
//
// Precondition: e.Round == o.Round if e.T != none and o.T != none
func (e thresholdEvent) fresherThan(o thresholdEvent) bool {
diff --git a/agreement/msgp_gen.go b/agreement/msgp_gen.go
index bf1c46f98..ae4e513b0 100644
--- a/agreement/msgp_gen.go
+++ b/agreement/msgp_gen.go
@@ -8,8 +8,10 @@ import (
"github.com/algorand/msgp/msgp"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/committee"
"github.com/algorand/go-algorand/protocol"
)
@@ -21,6 +23,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> CertificateMaxSize()
//
// ConsensusVersionView
// |-----> (*) MarshalMsg
@@ -29,6 +32,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> ConsensusVersionViewMaxSize()
//
// actionType
// |-----> MarshalMsg
@@ -37,6 +41,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> ActionTypeMaxSize()
//
// blockAssembler
// |-----> (*) MarshalMsg
@@ -45,6 +50,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> BlockAssemblerMaxSize()
//
// bundle
// |-----> (*) MarshalMsg
@@ -53,6 +59,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> BundleMaxSize()
//
// compoundMessage
// |-----> (*) MarshalMsg
@@ -61,6 +68,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> CompoundMessageMaxSize()
//
// diskState
// |-----> (*) MarshalMsg
@@ -69,6 +77,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> DiskStateMaxSize()
//
// equivocationVote
// |-----> (*) MarshalMsg
@@ -77,6 +86,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> EquivocationVoteMaxSize()
//
// equivocationVoteAuthenticator
// |-----> (*) MarshalMsg
@@ -85,6 +95,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> EquivocationVoteAuthenticatorMaxSize()
//
// eventType
// |-----> MarshalMsg
@@ -93,6 +104,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> EventTypeMaxSize()
//
// freshnessData
// |-----> (*) MarshalMsg
@@ -101,6 +113,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> FreshnessDataMaxSize()
//
// message
// |-----> (*) MarshalMsg
@@ -109,6 +122,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> MessageMaxSize()
//
// messageEvent
// |-----> (*) MarshalMsg
@@ -117,6 +131,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> MessageEventMaxSize()
//
// nextThresholdStatusEvent
// |-----> (*) MarshalMsg
@@ -125,6 +140,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> NextThresholdStatusEventMaxSize()
//
// period
// |-----> MarshalMsg
@@ -133,6 +149,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> PeriodMaxSize()
//
// periodRouter
// |-----> (*) MarshalMsg
@@ -141,6 +158,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> PeriodRouterMaxSize()
//
// player
// |-----> (*) MarshalMsg
@@ -149,6 +167,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> PlayerMaxSize()
//
// proposal
// |-----> (*) MarshalMsg
@@ -157,6 +176,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> ProposalMaxSize()
//
// proposalManager
// |-----> (*) MarshalMsg
@@ -165,6 +185,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> ProposalManagerMaxSize()
//
// proposalSeeker
// |-----> (*) MarshalMsg
@@ -173,6 +194,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> ProposalSeekerMaxSize()
//
// proposalStore
// |-----> (*) MarshalMsg
@@ -181,6 +203,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> ProposalStoreMaxSize()
//
// proposalTable
// |-----> (*) MarshalMsg
@@ -189,6 +212,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> ProposalTableMaxSize()
//
// proposalTracker
// |-----> (*) MarshalMsg
@@ -197,6 +221,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> ProposalTrackerMaxSize()
//
// proposalTrackerContract
// |-----> (*) MarshalMsg
@@ -205,6 +230,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> ProposalTrackerContractMaxSize()
//
// proposalValue
// |-----> (*) MarshalMsg
@@ -213,6 +239,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> ProposalValueMaxSize()
//
// proposalVoteCounter
// |-----> (*) MarshalMsg
@@ -221,6 +248,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> ProposalVoteCounterMaxSize()
//
// proposerSeed
// |-----> (*) MarshalMsg
@@ -229,6 +257,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> ProposerSeedMaxSize()
//
// rawVote
// |-----> (*) MarshalMsg
@@ -237,6 +266,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> RawVoteMaxSize()
//
// rootRouter
// |-----> (*) MarshalMsg
@@ -245,6 +275,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> RootRouterMaxSize()
//
// roundRouter
// |-----> (*) MarshalMsg
@@ -253,6 +284,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> RoundRouterMaxSize()
//
// seedInput
// |-----> (*) MarshalMsg
@@ -261,6 +293,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> SeedInputMaxSize()
//
// selector
// |-----> (*) MarshalMsg
@@ -269,6 +302,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> SelectorMaxSize()
//
// serializableError
// |-----> MarshalMsg
@@ -277,6 +311,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> SerializableErrorMaxSize()
//
// step
// |-----> MarshalMsg
@@ -285,6 +320,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> StepMaxSize()
//
// stepRouter
// |-----> (*) MarshalMsg
@@ -293,6 +329,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> StepRouterMaxSize()
//
// thresholdEvent
// |-----> (*) MarshalMsg
@@ -301,6 +338,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> ThresholdEventMaxSize()
//
// transmittedPayload
// |-----> (*) MarshalMsg
@@ -309,6 +347,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> TransmittedPayloadMaxSize()
//
// unauthenticatedBundle
// |-----> (*) MarshalMsg
@@ -317,6 +356,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> UnauthenticatedBundleMaxSize()
//
// unauthenticatedEquivocationVote
// |-----> (*) MarshalMsg
@@ -325,6 +365,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> UnauthenticatedEquivocationVoteMaxSize()
//
// unauthenticatedProposal
// |-----> (*) MarshalMsg
@@ -333,6 +374,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> UnauthenticatedProposalMaxSize()
//
// unauthenticatedVote
// |-----> (*) MarshalMsg
@@ -341,6 +383,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> UnauthenticatedVoteMaxSize()
//
// vote
// |-----> (*) MarshalMsg
@@ -349,6 +392,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> VoteMaxSize()
//
// voteAggregator
// |-----> (*) MarshalMsg
@@ -357,6 +401,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> VoteAggregatorMaxSize()
//
// voteAuthenticator
// |-----> (*) MarshalMsg
@@ -365,6 +410,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> VoteAuthenticatorMaxSize()
//
// voteTracker
// |-----> (*) MarshalMsg
@@ -373,6 +419,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> VoteTrackerMaxSize()
//
// voteTrackerContract
// |-----> (*) MarshalMsg
@@ -381,6 +428,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> VoteTrackerContractMaxSize()
//
// voteTrackerPeriod
// |-----> (*) MarshalMsg
@@ -389,6 +437,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> VoteTrackerPeriodMaxSize()
//
// voteTrackerRound
// |-----> (*) MarshalMsg
@@ -397,6 +446,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> VoteTrackerRoundMaxSize()
//
// MarshalMsg implements msgp.Marshaler
@@ -741,6 +791,17 @@ func (z *Certificate) MsgIsZero() bool {
return ((*z).Round.MsgIsZero()) && ((*z).Period == 0) && ((*z).Step == 0) && ((*z).Proposal.MsgIsZero()) && (len((*z).Votes) == 0) && (len((*z).EquivocationVotes) == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func CertificateMaxSize() (s int) {
+ s = 1 + 4 + basics.RoundMaxSize() + 4 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + ProposalValueMaxSize() + 5
+ // Calculating size of slice: z.Votes
+ s += msgp.ArrayHeaderSize + ((config.MaxVoteThreshold) * (VoteAuthenticatorMaxSize()))
+ s += 4
+ // Calculating size of slice: z.EquivocationVotes
+ s += msgp.ArrayHeaderSize + ((config.MaxVoteThreshold) * (EquivocationVoteAuthenticatorMaxSize()))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *ConsensusVersionView) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -892,6 +953,14 @@ func (z *ConsensusVersionView) MsgIsZero() bool {
return ((*z).Err == nil) && ((*z).Version.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func ConsensusVersionViewMaxSize() (s int) {
+ s = 1 + 4
+ panic("Unable to determine max size: String type string(*z.Err) is unbounded")
+ s += 8 + protocol.ConsensusVersionMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z actionType) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -938,6 +1007,12 @@ func (z actionType) MsgIsZero() bool {
return z == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func ActionTypeMaxSize() (s int) {
+ s = msgp.Uint8Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *blockAssembler) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1142,6 +1217,14 @@ func (z *blockAssembler) MsgIsZero() bool {
return ((*z).Pipeline.MsgIsZero()) && ((*z).Filled == false) && ((*z).Payload.MsgIsZero()) && ((*z).Assembled == false) && (len((*z).Authenticators) == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func BlockAssemblerMaxSize() (s int) {
+ s = 1 + 9 + UnauthenticatedProposalMaxSize() + 7 + msgp.BoolSize + 8 + ProposalMaxSize() + 10 + msgp.BoolSize + 15
+ // Calculating size of slice: z.Authenticators
+ panic("Slice z.Authenticators is unbounded")
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *bundle) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1399,6 +1482,17 @@ func (z *bundle) MsgIsZero() bool {
return ((*z).U.MsgIsZero()) && (len((*z).Votes) == 0) && (len((*z).EquivocationVotes) == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func BundleMaxSize() (s int) {
+ s = 1 + 2 + UnauthenticatedBundleMaxSize() + 5
+ // Calculating size of slice: z.Votes
+ s += msgp.ArrayHeaderSize + ((config.MaxVoteThreshold) * (VoteMaxSize()))
+ s += 4
+ // Calculating size of slice: z.EquivocationVotes
+ s += msgp.ArrayHeaderSize + ((config.MaxVoteThreshold) * (EquivocationVoteMaxSize()))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *compoundMessage) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1510,6 +1604,12 @@ func (z *compoundMessage) MsgIsZero() bool {
return ((*z).Vote.MsgIsZero()) && ((*z).Proposal.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func CompoundMessageMaxSize() (s int) {
+ s = 1 + 5 + UnauthenticatedVoteMaxSize() + 9 + UnauthenticatedProposalMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *diskState) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1761,6 +1861,23 @@ func (z *diskState) MsgIsZero() bool {
return (len((*z).Router) == 0) && (len((*z).Player) == 0) && (len((*z).Clock) == 0) && (len((*z).ActionTypes) == 0) && (len((*z).Actions) == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func DiskStateMaxSize() (s int) {
+ s = 1 + 7
+ panic("Unable to determine max size: Byteslice type z.Router is unbounded")
+ s += 7
+ panic("Unable to determine max size: Byteslice type z.Player is unbounded")
+ s += 6
+ panic("Unable to determine max size: Byteslice type z.Clock is unbounded")
+ s += 12
+ // Calculating size of slice: z.ActionTypes
+ panic("Slice z.ActionTypes is unbounded")
+ s += 8
+ // Calculating size of slice: z.Actions
+ panic("Slice z.Actions is unbounded")
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *equivocationVote) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2082,6 +2199,17 @@ func (z *equivocationVote) MsgIsZero() bool {
return ((*z).Sender.MsgIsZero()) && ((*z).Round.MsgIsZero()) && ((*z).Period == 0) && ((*z).Step == 0) && ((*z).Cred.MsgIsZero()) && (((*z).Proposals[0].MsgIsZero()) && ((*z).Proposals[1].MsgIsZero())) && (((*z).Sigs[0].MsgIsZero()) && ((*z).Sigs[1].MsgIsZero()))
}
+// MaxSize returns a maximum valid message size for this message type
+func EquivocationVoteMaxSize() (s int) {
+ s = 1 + 4 + basics.AddressMaxSize() + 4 + basics.RoundMaxSize() + 4 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + committee.CredentialMaxSize() + 6
+ // Calculating size of array: z.Proposals
+ s += msgp.ArrayHeaderSize + ((2) * (ProposalValueMaxSize()))
+ s += 5
+ // Calculating size of array: z.Sigs
+ s += msgp.ArrayHeaderSize + ((2) * (crypto.OneTimeSignatureMaxSize()))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *equivocationVoteAuthenticator) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2293,6 +2421,17 @@ func (z *equivocationVoteAuthenticator) MsgIsZero() bool {
return ((*z).Sender.MsgIsZero()) && ((*z).Cred.MsgIsZero()) && (((*z).Sigs[0].MsgIsZero()) && ((*z).Sigs[1].MsgIsZero())) && (((*z).Proposals[0].MsgIsZero()) && ((*z).Proposals[1].MsgIsZero()))
}
+// MaxSize returns a maximum valid message size for this message type
+func EquivocationVoteAuthenticatorMaxSize() (s int) {
+ s = 1 + 4 + basics.AddressMaxSize() + 5 + committee.UnauthenticatedCredentialMaxSize() + 4
+ // Calculating size of array: z.Sigs
+ s += msgp.ArrayHeaderSize + ((2) * (crypto.OneTimeSignatureMaxSize()))
+ s += 6
+ // Calculating size of array: z.Proposals
+ s += msgp.ArrayHeaderSize + ((2) * (ProposalValueMaxSize()))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z eventType) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2339,6 +2478,12 @@ func (z eventType) MsgIsZero() bool {
return z == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func EventTypeMaxSize() (s int) {
+ s = msgp.Uint8Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *freshnessData) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2508,6 +2653,12 @@ func (z *freshnessData) MsgIsZero() bool {
return ((*z).PlayerRound.MsgIsZero()) && ((*z).PlayerPeriod == 0) && ((*z).PlayerStep == 0) && ((*z).PlayerLastConcluding == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func FreshnessDataMaxSize() (s int) {
+ s = 1 + 12 + basics.RoundMaxSize() + 13 + msgp.Uint64Size + 11 + msgp.Uint64Size + 21 + msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *message) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2872,6 +3023,14 @@ func (z *message) MsgIsZero() bool {
return ((*z).MessageHandle.MsgIsZero()) && ((*z).Tag.MsgIsZero()) && ((*z).Vote.MsgIsZero()) && ((*z).Proposal.MsgIsZero()) && ((*z).Bundle.MsgIsZero()) && ((*z).UnauthenticatedVote.MsgIsZero()) && ((*z).UnauthenticatedProposal.MsgIsZero()) && ((*z).UnauthenticatedBundle.MsgIsZero()) && (((*z).CompoundMessage.Vote.MsgIsZero()) && ((*z).CompoundMessage.Proposal.MsgIsZero()))
}
+// MaxSize returns a maximum valid message size for this message type
+func MessageMaxSize() (s int) {
+ s = 1 + 14
+ panic("Unable to determine max size: MaxSize() not implemented for Raw type")
+ s += 4 + protocol.TagMaxSize() + 5 + VoteMaxSize() + 9 + ProposalMaxSize() + 7 + BundleMaxSize() + 20 + UnauthenticatedVoteMaxSize() + 24 + UnauthenticatedProposalMaxSize() + 22 + UnauthenticatedBundleMaxSize() + 16 + 1 + 5 + UnauthenticatedVoteMaxSize() + 9 + UnauthenticatedProposalMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *messageEvent) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -3148,6 +3307,16 @@ func (z *messageEvent) MsgIsZero() bool {
return ((*z).T == 0) && ((*z).Input.MsgIsZero()) && ((*z).Err == nil) && ((*z).TaskIndex == 0) && ((*z).Tail == nil) && ((*z).Cancelled == false) && ((*z).Proto.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func MessageEventMaxSize() (s int) {
+ s = 1 + 2 + msgp.Uint8Size + 6 + MessageMaxSize() + 4
+ panic("Unable to determine max size: String type string(*z.Err) is unbounded")
+ s += 10 + msgp.Uint64Size + 5
+ s += MessageEventMaxSize()
+ s += 10 + msgp.BoolSize + 6 + ConsensusVersionViewMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *nextThresholdStatusEvent) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -3259,6 +3428,12 @@ func (z *nextThresholdStatusEvent) MsgIsZero() bool {
return ((*z).Bottom == false) && ((*z).Proposal.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func NextThresholdStatusEventMaxSize() (s int) {
+ s = 1 + 7 + msgp.BoolSize + 9 + ProposalValueMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z period) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -3305,6 +3480,12 @@ func (z period) MsgIsZero() bool {
return z == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func PeriodMaxSize() (s int) {
+ s = msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *periodRouter) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -3549,6 +3730,14 @@ func (z *periodRouter) MsgIsZero() bool {
return ((*z).ProposalTracker.MsgIsZero()) && ((*z).VoteTrackerPeriod.MsgIsZero()) && ((*z).ProposalTrackerContract.MsgIsZero()) && (len((*z).Children) == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func PeriodRouterMaxSize() (s int) {
+ s = 1 + 16 + ProposalTrackerMaxSize() + 18 + VoteTrackerPeriodMaxSize() + 24 + ProposalTrackerContractMaxSize() + 9
+ s += msgp.MapHeaderSize
+ panic("Map z.Children is unbounded")
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *player) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -3786,6 +3975,12 @@ func (z *player) MsgIsZero() bool {
return ((*z).Round.MsgIsZero()) && ((*z).Period == 0) && ((*z).Step == 0) && ((*z).LastConcluding == 0) && ((*z).Deadline == 0) && ((*z).Napping == false) && ((*z).FastRecoveryDeadline == 0) && ((*z).Pending.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func PlayerMaxSize() (s int) {
+ s = 1 + 6 + basics.RoundMaxSize() + 7 + msgp.Uint64Size + 5 + msgp.Uint64Size + 15 + msgp.Uint64Size + 9 + msgp.DurationSize + 8 + msgp.BoolSize + 21 + msgp.DurationSize + 8 + ProposalTableMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *proposal) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -4150,6 +4345,16 @@ func (z *proposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0004 > 0 {
zb0004--
+ var zb0006 int
+ zb0006, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "GenesisID")
+ return
+ }
+ if zb0006 > config.MaxGenesisIDLen {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(config.MaxGenesisIDLen))
+ return
+ }
(*z).unauthenticatedProposal.Block.BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisID")
@@ -4286,27 +4491,27 @@ func (z *proposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0004 > 0 {
zb0004--
- var zb0006 int
- var zb0007 bool
- zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
- if zb0006 > protocol.NumStateProofTypes {
- err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumStateProofTypes))
+ if zb0007 > protocol.NumStateProofTypes {
+ err = msgp.ErrOverflow(uint64(zb0007), uint64(protocol.NumStateProofTypes))
err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
- if zb0007 {
+ if zb0008 {
(*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking = nil
} else if (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking == nil {
- (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData, zb0006)
+ (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData, zb0007)
}
- for zb0006 > 0 {
+ for zb0007 > 0 {
var zb0001 protocol.StateProofType
var zb0002 bookkeeping.StateProofTrackingData
- zb0006--
+ zb0007--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
@@ -4322,24 +4527,24 @@ func (z *proposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0004 > 0 {
zb0004--
- var zb0008 int
- var zb0009 bool
- zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0009 int
+ var zb0010 bool
+ zb0009, zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
return
}
- if zb0008 > config.MaxProposedExpiredOnlineAccounts {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxProposedExpiredOnlineAccounts))
+ if zb0009 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0009), uint64(config.MaxProposedExpiredOnlineAccounts))
err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
return
}
- if zb0009 {
+ if zb0010 {
(*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
- } else if (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0008 {
- (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0008]
+ } else if (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0009 {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0009]
} else {
- (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0008)
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0009)
}
for zb0003 := range (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
@@ -4368,13 +4573,13 @@ func (z *proposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
if zb0004 > 0 {
zb0004--
{
- var zb0010 uint64
- zb0010, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0011 uint64
+ zb0011, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "OriginalPeriod")
return
}
- (*z).unauthenticatedProposal.OriginalPeriod = period(zb0010)
+ (*z).unauthenticatedProposal.OriginalPeriod = period(zb0011)
}
}
if zb0004 > 0 {
@@ -4445,6 +4650,16 @@ func (z *proposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "gen":
+ var zb0012 int
+ zb0012, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "GenesisID")
+ return
+ }
+ if zb0012 > config.MaxGenesisIDLen {
+ err = msgp.ErrOverflow(uint64(zb0012), uint64(config.MaxGenesisIDLen))
+ return
+ }
(*z).unauthenticatedProposal.Block.BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "GenesisID")
@@ -4547,27 +4762,27 @@ func (z *proposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "spt":
- var zb0011 int
- var zb0012 bool
- zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0013 int
+ var zb0014 bool
+ zb0013, zb0014, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StateProofTracking")
return
}
- if zb0011 > protocol.NumStateProofTypes {
- err = msgp.ErrOverflow(uint64(zb0011), uint64(protocol.NumStateProofTypes))
+ if zb0013 > protocol.NumStateProofTypes {
+ err = msgp.ErrOverflow(uint64(zb0013), uint64(protocol.NumStateProofTypes))
err = msgp.WrapError(err, "StateProofTracking")
return
}
- if zb0012 {
+ if zb0014 {
(*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking = nil
} else if (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking == nil {
- (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData, zb0011)
+ (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData, zb0013)
}
- for zb0011 > 0 {
+ for zb0013 > 0 {
var zb0001 protocol.StateProofType
var zb0002 bookkeeping.StateProofTrackingData
- zb0011--
+ zb0013--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "StateProofTracking")
@@ -4581,24 +4796,24 @@ func (z *proposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
(*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking[zb0001] = zb0002
}
case "partupdrmv":
- var zb0013 int
- var zb0014 bool
- zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0015 int
+ var zb0016 bool
+ zb0015, zb0016, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ExpiredParticipationAccounts")
return
}
- if zb0013 > config.MaxProposedExpiredOnlineAccounts {
- err = msgp.ErrOverflow(uint64(zb0013), uint64(config.MaxProposedExpiredOnlineAccounts))
+ if zb0015 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0015), uint64(config.MaxProposedExpiredOnlineAccounts))
err = msgp.WrapError(err, "ExpiredParticipationAccounts")
return
}
- if zb0014 {
+ if zb0016 {
(*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
- } else if (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0013 {
- (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0013]
+ } else if (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0015 {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0015]
} else {
- (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0013)
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0015)
}
for zb0003 := range (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
@@ -4621,13 +4836,13 @@ func (z *proposal) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
case "oper":
{
- var zb0015 uint64
- zb0015, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0017 uint64
+ zb0017, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "OriginalPeriod")
return
}
- (*z).unauthenticatedProposal.OriginalPeriod = period(zb0015)
+ (*z).unauthenticatedProposal.OriginalPeriod = period(zb0017)
}
case "oprop":
bts, err = (*z).unauthenticatedProposal.OriginalProposer.UnmarshalMsg(bts)
@@ -4676,6 +4891,24 @@ func (z *proposal) MsgIsZero() bool {
return ((*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "") && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking) == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && ((*z).unauthenticatedProposal.Block.Payset.MsgIsZero()) && ((*z).unauthenticatedProposal.SeedProof.MsgIsZero()) && ((*z).unauthenticatedProposal.OriginalPeriod == 0) && ((*z).unauthenticatedProposal.OriginalProposer.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func ProposalMaxSize() (s int) {
+ s = 3 + 4 + basics.RoundMaxSize() + 5 + bookkeeping.BlockHashMaxSize() + 5 + committee.SeedMaxSize() + 4 + crypto.DigestMaxSize() + 7 + crypto.DigestMaxSize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + config.MaxGenesisIDLen + 3 + crypto.DigestMaxSize() + 5 + basics.AddressMaxSize() + 4 + basics.AddressMaxSize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + basics.RoundMaxSize() + 6 + protocol.ConsensusVersionMaxSize() + 10 + protocol.ConsensusVersionMaxSize() + 8 + msgp.Uint64Size + 11 + basics.RoundMaxSize() + 11 + basics.RoundMaxSize() + 12 + protocol.ConsensusVersionMaxSize() + 13 + basics.RoundMaxSize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4
+ s += msgp.MapHeaderSize
+ // Adding size of map keys for z.unauthenticatedProposal.Block.BlockHeader.StateProofTracking
+ s += protocol.NumStateProofTypes * (protocol.StateProofTypeMaxSize())
+ // Adding size of map values for z.unauthenticatedProposal.Block.BlockHeader.StateProofTracking
+ s += protocol.NumStateProofTypes * (bookkeeping.StateProofTrackingDataMaxSize())
+ s += 11
+ // Calculating size of slice: z.unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts
+ s += msgp.ArrayHeaderSize + ((config.MaxProposedExpiredOnlineAccounts) * (basics.AddressMaxSize()))
+ s += 5
+ // Using maxtotalbytes for: z.unauthenticatedProposal.Block.Payset
+ s += config.MaxTxnBytesPerBlock
+ s += 5 + crypto.VrfProofMaxSize() + 5 + msgp.Uint64Size + 6 + basics.AddressMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *proposalManager) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -4754,6 +4987,12 @@ func (z *proposalManager) MsgIsZero() bool {
return true
}
+// MaxSize returns a maximum valid message size for this message type
+func ProposalManagerMaxSize() (s int) {
+ s = 1
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *proposalSeeker) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -4882,6 +5121,12 @@ func (z *proposalSeeker) MsgIsZero() bool {
return ((*z).Lowest.MsgIsZero()) && ((*z).Filled == false) && ((*z).Frozen == false)
}
+// MaxSize returns a maximum valid message size for this message type
+func ProposalSeekerMaxSize() (s int) {
+ s = 1 + 7 + VoteMaxSize() + 7 + msgp.BoolSize + 7 + msgp.BoolSize
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *proposalStore) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -5147,6 +5392,17 @@ func (z *proposalStore) MsgIsZero() bool {
return (len((*z).Relevant) == 0) && ((*z).Pinned.MsgIsZero()) && (len((*z).Assemblers) == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func ProposalStoreMaxSize() (s int) {
+ s = 1 + 9
+ s += msgp.MapHeaderSize
+ panic("Map z.Relevant is unbounded")
+ s += 7 + ProposalValueMaxSize() + 11
+ s += msgp.MapHeaderSize
+ panic("Map z.Assemblers is unbounded")
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *proposalTable) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -5376,6 +5632,15 @@ func (z *proposalTable) MsgIsZero() bool {
return (len((*z).Pending) == 0) && ((*z).PendingNext == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func ProposalTableMaxSize() (s int) {
+ s = 1 + 8
+ s += msgp.MapHeaderSize
+ panic("Map z.Pending is unbounded")
+ s += 12 + msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *proposalTracker) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -5573,6 +5838,15 @@ func (z *proposalTracker) MsgIsZero() bool {
return (len((*z).Duplicate) == 0) && ((*z).Freezer.MsgIsZero()) && ((*z).Staging.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func ProposalTrackerMaxSize() (s int) {
+ s = 1 + 10
+ s += msgp.MapHeaderSize
+ panic("Map z.Duplicate is unbounded")
+ s += 8 + ProposalSeekerMaxSize() + 8 + ProposalValueMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *proposalTrackerContract) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -5718,6 +5992,12 @@ func (z *proposalTrackerContract) MsgIsZero() bool {
return ((*z).SawOneVote == false) && ((*z).Froze == false) && ((*z).SawSoftThreshold == false) && ((*z).SawCertThreshold == false)
}
+// MaxSize returns a maximum valid message size for this message type
+func ProposalTrackerContractMaxSize() (s int) {
+ s = 1 + 11 + msgp.BoolSize + 6 + msgp.BoolSize + 17 + msgp.BoolSize + 17 + msgp.BoolSize
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *proposalValue) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -5901,6 +6181,12 @@ func (z *proposalValue) MsgIsZero() bool {
return ((*z).OriginalPeriod == 0) && ((*z).OriginalProposer.MsgIsZero()) && ((*z).BlockDigest.MsgIsZero()) && ((*z).EncodingDigest.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func ProposalValueMaxSize() (s int) {
+ s = 1 + 5 + msgp.Uint64Size + 6 + basics.AddressMaxSize() + 4 + crypto.DigestMaxSize() + 7 + crypto.DigestMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *proposalVoteCounter) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -6080,6 +6366,14 @@ func (z *proposalVoteCounter) MsgIsZero() bool {
return ((*z).Count == 0) && (len((*z).Votes) == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func ProposalVoteCounterMaxSize() (s int) {
+ s = 1 + 6 + msgp.Uint64Size + 6
+ s += msgp.MapHeaderSize
+ panic("Map z.Votes is unbounded")
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *proposerSeed) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -6191,6 +6485,12 @@ func (z *proposerSeed) MsgIsZero() bool {
return ((*z).Addr.MsgIsZero()) && ((*z).VRF.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func ProposerSeedMaxSize() (s int) {
+ s = 1 + 5 + basics.AddressMaxSize() + 4 + crypto.VrfOutputMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *rawVote) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -6405,6 +6705,12 @@ func (z *rawVote) MsgIsZero() bool {
return ((*z).Sender.MsgIsZero()) && ((*z).Round.MsgIsZero()) && ((*z).Period == 0) && ((*z).Step == 0) && ((*z).Proposal.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func RawVoteMaxSize() (s int) {
+ s = 1 + 4 + basics.AddressMaxSize() + 4 + basics.RoundMaxSize() + 4 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + ProposalValueMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *rootRouter) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -6416,7 +6722,7 @@ func (z *rootRouter) MarshalMsg(b []byte) (o []byte) {
} else {
o = msgp.AppendMapHeader(o, uint32(len((*z).Children)))
}
- zb0001_keys := make([]round, 0, len((*z).Children))
+ zb0001_keys := make([]basics.Round, 0, len((*z).Children))
for zb0001 := range (*z).Children {
zb0001_keys = append(zb0001_keys, zb0001)
}
@@ -6563,7 +6869,7 @@ func (z *rootRouter) UnmarshalMsg(bts []byte) (o []byte, err error) {
(*z).Children = make(map[round]*roundRouter, zb0009)
}
for zb0009 > 0 {
- var zb0001 round
+ var zb0001 basics.Round
var zb0002 *roundRouter
zb0009--
bts, err = zb0001.UnmarshalMsg(bts)
@@ -6711,7 +7017,7 @@ func (z *rootRouter) UnmarshalMsg(bts []byte) (o []byte, err error) {
(*z).Children = make(map[round]*roundRouter, zb0015)
}
for zb0015 > 0 {
- var zb0001 round
+ var zb0001 basics.Round
var zb0002 *roundRouter
zb0015--
bts, err = zb0001.UnmarshalMsg(bts)
@@ -6778,6 +7084,14 @@ func (z *rootRouter) MsgIsZero() bool {
return (len((*z).Children) == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func RootRouterMaxSize() (s int) {
+ s = 1 + 16 + 1 + 15 + 1 + 9
+ s += msgp.MapHeaderSize
+ panic("Map z.Children is unbounded")
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *roundRouter) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -7139,6 +7453,14 @@ func (z *roundRouter) MsgIsZero() bool {
return ((*z).ProposalStore.MsgIsZero()) && (((*z).VoteTrackerRound.Freshest.MsgIsZero()) && ((*z).VoteTrackerRound.Ok == false)) && (len((*z).Children) == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func RoundRouterMaxSize() (s int) {
+ s = 1 + 14 + ProposalStoreMaxSize() + 17 + 1 + 9 + ThresholdEventMaxSize() + 3 + msgp.BoolSize + 9
+ s += msgp.MapHeaderSize
+ panic("Map z.Children is unbounded")
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *seedInput) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -7250,6 +7572,12 @@ func (z *seedInput) MsgIsZero() bool {
return ((*z).Alpha.MsgIsZero()) && ((*z).History.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func SeedInputMaxSize() (s int) {
+ s = 1 + 6 + crypto.DigestMaxSize() + 5 + crypto.DigestMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *selector) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -7411,6 +7739,12 @@ func (z *selector) MsgIsZero() bool {
return ((*z).Seed.MsgIsZero()) && ((*z).Round.MsgIsZero()) && ((*z).Period == 0) && ((*z).Step == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func SelectorMaxSize() (s int) {
+ s = 1 + 5 + committee.SeedMaxSize() + 4 + basics.RoundMaxSize() + 4 + msgp.Uint64Size + 5 + msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z serializableError) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -7457,6 +7791,12 @@ func (z serializableError) MsgIsZero() bool {
return z == ""
}
+// MaxSize returns a maximum valid message size for this message type
+func SerializableErrorMaxSize() (s int) {
+ panic("Unable to determine max size: String type string(z) is unbounded")
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z step) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -7503,6 +7843,12 @@ func (z step) MsgIsZero() bool {
return z == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func StepMaxSize() (s int) {
+ s = msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *stepRouter) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -7614,6 +7960,12 @@ func (z *stepRouter) MsgIsZero() bool {
return ((*z).VoteTracker.MsgIsZero()) && ((*z).VoteTrackerContract.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func StepRouterMaxSize() (s int) {
+ s = 1 + 12 + VoteTrackerMaxSize() + 20 + VoteTrackerContractMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *thresholdEvent) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -7834,6 +8186,12 @@ func (z *thresholdEvent) MsgIsZero() bool {
return ((*z).T == 0) && ((*z).Round.MsgIsZero()) && ((*z).Period == 0) && ((*z).Step == 0) && ((*z).Proposal.MsgIsZero()) && ((*z).Bundle.MsgIsZero()) && ((*z).Proto.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func ThresholdEventMaxSize() (s int) {
+ s = 1 + 2 + msgp.Uint8Size + 6 + basics.RoundMaxSize() + 7 + msgp.Uint64Size + 5 + msgp.Uint64Size + 9 + ProposalValueMaxSize() + 7 + UnauthenticatedBundleMaxSize() + 6 + protocol.ConsensusVersionMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *transmittedPayload) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -8207,6 +8565,16 @@ func (z *transmittedPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0004 > 0 {
zb0004--
+ var zb0006 int
+ zb0006, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "GenesisID")
+ return
+ }
+ if zb0006 > config.MaxGenesisIDLen {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(config.MaxGenesisIDLen))
+ return
+ }
(*z).unauthenticatedProposal.Block.BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisID")
@@ -8343,27 +8711,27 @@ func (z *transmittedPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0004 > 0 {
zb0004--
- var zb0006 int
- var zb0007 bool
- zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
- if zb0006 > protocol.NumStateProofTypes {
- err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumStateProofTypes))
+ if zb0007 > protocol.NumStateProofTypes {
+ err = msgp.ErrOverflow(uint64(zb0007), uint64(protocol.NumStateProofTypes))
err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
- if zb0007 {
+ if zb0008 {
(*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking = nil
} else if (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking == nil {
- (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData, zb0006)
+ (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData, zb0007)
}
- for zb0006 > 0 {
+ for zb0007 > 0 {
var zb0001 protocol.StateProofType
var zb0002 bookkeeping.StateProofTrackingData
- zb0006--
+ zb0007--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
@@ -8379,24 +8747,24 @@ func (z *transmittedPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0004 > 0 {
zb0004--
- var zb0008 int
- var zb0009 bool
- zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0009 int
+ var zb0010 bool
+ zb0009, zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
return
}
- if zb0008 > config.MaxProposedExpiredOnlineAccounts {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxProposedExpiredOnlineAccounts))
+ if zb0009 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0009), uint64(config.MaxProposedExpiredOnlineAccounts))
err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
return
}
- if zb0009 {
+ if zb0010 {
(*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
- } else if (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0008 {
- (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0008]
+ } else if (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0009 {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0009]
} else {
- (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0008)
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0009)
}
for zb0003 := range (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
@@ -8425,13 +8793,13 @@ func (z *transmittedPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
if zb0004 > 0 {
zb0004--
{
- var zb0010 uint64
- zb0010, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0011 uint64
+ zb0011, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "OriginalPeriod")
return
}
- (*z).unauthenticatedProposal.OriginalPeriod = period(zb0010)
+ (*z).unauthenticatedProposal.OriginalPeriod = period(zb0011)
}
}
if zb0004 > 0 {
@@ -8510,6 +8878,16 @@ func (z *transmittedPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "gen":
+ var zb0012 int
+ zb0012, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "GenesisID")
+ return
+ }
+ if zb0012 > config.MaxGenesisIDLen {
+ err = msgp.ErrOverflow(uint64(zb0012), uint64(config.MaxGenesisIDLen))
+ return
+ }
(*z).unauthenticatedProposal.Block.BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "GenesisID")
@@ -8612,27 +8990,27 @@ func (z *transmittedPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "spt":
- var zb0011 int
- var zb0012 bool
- zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0013 int
+ var zb0014 bool
+ zb0013, zb0014, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StateProofTracking")
return
}
- if zb0011 > protocol.NumStateProofTypes {
- err = msgp.ErrOverflow(uint64(zb0011), uint64(protocol.NumStateProofTypes))
+ if zb0013 > protocol.NumStateProofTypes {
+ err = msgp.ErrOverflow(uint64(zb0013), uint64(protocol.NumStateProofTypes))
err = msgp.WrapError(err, "StateProofTracking")
return
}
- if zb0012 {
+ if zb0014 {
(*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking = nil
} else if (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking == nil {
- (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData, zb0011)
+ (*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData, zb0013)
}
- for zb0011 > 0 {
+ for zb0013 > 0 {
var zb0001 protocol.StateProofType
var zb0002 bookkeeping.StateProofTrackingData
- zb0011--
+ zb0013--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "StateProofTracking")
@@ -8646,24 +9024,24 @@ func (z *transmittedPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
(*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking[zb0001] = zb0002
}
case "partupdrmv":
- var zb0013 int
- var zb0014 bool
- zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0015 int
+ var zb0016 bool
+ zb0015, zb0016, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ExpiredParticipationAccounts")
return
}
- if zb0013 > config.MaxProposedExpiredOnlineAccounts {
- err = msgp.ErrOverflow(uint64(zb0013), uint64(config.MaxProposedExpiredOnlineAccounts))
+ if zb0015 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0015), uint64(config.MaxProposedExpiredOnlineAccounts))
err = msgp.WrapError(err, "ExpiredParticipationAccounts")
return
}
- if zb0014 {
+ if zb0016 {
(*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
- } else if (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0013 {
- (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0013]
+ } else if (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0015 {
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0015]
} else {
- (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0013)
+ (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0015)
}
for zb0003 := range (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
bts, err = (*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
@@ -8686,13 +9064,13 @@ func (z *transmittedPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
case "oper":
{
- var zb0015 uint64
- zb0015, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0017 uint64
+ zb0017, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "OriginalPeriod")
return
}
- (*z).unauthenticatedProposal.OriginalPeriod = period(zb0015)
+ (*z).unauthenticatedProposal.OriginalPeriod = period(zb0017)
}
case "oprop":
bts, err = (*z).unauthenticatedProposal.OriginalProposer.UnmarshalMsg(bts)
@@ -8747,6 +9125,24 @@ func (z *transmittedPayload) MsgIsZero() bool {
return ((*z).unauthenticatedProposal.Block.BlockHeader.Round.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Branch.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.Seed.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.TimeStamp == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisID == "") && ((*z).unauthenticatedProposal.Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).unauthenticatedProposal.Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).unauthenticatedProposal.Block.BlockHeader.TxnCounter == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.StateProofTracking) == 0) && (len((*z).unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && ((*z).unauthenticatedProposal.Block.Payset.MsgIsZero()) && ((*z).unauthenticatedProposal.SeedProof.MsgIsZero()) && ((*z).unauthenticatedProposal.OriginalPeriod == 0) && ((*z).unauthenticatedProposal.OriginalProposer.MsgIsZero()) && ((*z).PriorVote.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func TransmittedPayloadMaxSize() (s int) {
+ s = 3 + 4 + basics.RoundMaxSize() + 5 + bookkeeping.BlockHashMaxSize() + 5 + committee.SeedMaxSize() + 4 + crypto.DigestMaxSize() + 7 + crypto.DigestMaxSize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + config.MaxGenesisIDLen + 3 + crypto.DigestMaxSize() + 5 + basics.AddressMaxSize() + 4 + basics.AddressMaxSize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + basics.RoundMaxSize() + 6 + protocol.ConsensusVersionMaxSize() + 10 + protocol.ConsensusVersionMaxSize() + 8 + msgp.Uint64Size + 11 + basics.RoundMaxSize() + 11 + basics.RoundMaxSize() + 12 + protocol.ConsensusVersionMaxSize() + 13 + basics.RoundMaxSize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4
+ s += msgp.MapHeaderSize
+ // Adding size of map keys for z.unauthenticatedProposal.Block.BlockHeader.StateProofTracking
+ s += protocol.NumStateProofTypes * (protocol.StateProofTypeMaxSize())
+ // Adding size of map values for z.unauthenticatedProposal.Block.BlockHeader.StateProofTracking
+ s += protocol.NumStateProofTypes * (bookkeeping.StateProofTrackingDataMaxSize())
+ s += 11
+ // Calculating size of slice: z.unauthenticatedProposal.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts
+ s += msgp.ArrayHeaderSize + ((config.MaxProposedExpiredOnlineAccounts) * (basics.AddressMaxSize()))
+ s += 5
+ // Using maxtotalbytes for: z.unauthenticatedProposal.Block.Payset
+ s += config.MaxTxnBytesPerBlock
+ s += 5 + crypto.VrfProofMaxSize() + 5 + msgp.Uint64Size + 6 + basics.AddressMaxSize() + 3 + UnauthenticatedVoteMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *unauthenticatedBundle) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -9089,6 +9485,17 @@ func (z *unauthenticatedBundle) MsgIsZero() bool {
return ((*z).Round.MsgIsZero()) && ((*z).Period == 0) && ((*z).Step == 0) && ((*z).Proposal.MsgIsZero()) && (len((*z).Votes) == 0) && (len((*z).EquivocationVotes) == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func UnauthenticatedBundleMaxSize() (s int) {
+ s = 1 + 4 + basics.RoundMaxSize() + 4 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + ProposalValueMaxSize() + 5
+ // Calculating size of slice: z.Votes
+ s += msgp.ArrayHeaderSize + ((config.MaxVoteThreshold) * (VoteAuthenticatorMaxSize()))
+ s += 4
+ // Calculating size of slice: z.EquivocationVotes
+ s += msgp.ArrayHeaderSize + ((config.MaxVoteThreshold) * (EquivocationVoteAuthenticatorMaxSize()))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *unauthenticatedEquivocationVote) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -9410,6 +9817,17 @@ func (z *unauthenticatedEquivocationVote) MsgIsZero() bool {
return ((*z).Sender.MsgIsZero()) && ((*z).Round.MsgIsZero()) && ((*z).Period == 0) && ((*z).Step == 0) && ((*z).Cred.MsgIsZero()) && (((*z).Proposals[0].MsgIsZero()) && ((*z).Proposals[1].MsgIsZero())) && (((*z).Sigs[0].MsgIsZero()) && ((*z).Sigs[1].MsgIsZero()))
}
+// MaxSize returns a maximum valid message size for this message type
+func UnauthenticatedEquivocationVoteMaxSize() (s int) {
+ s = 1 + 4 + basics.AddressMaxSize() + 4 + basics.RoundMaxSize() + 4 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + committee.UnauthenticatedCredentialMaxSize() + 6
+ // Calculating size of array: z.Proposals
+ s += msgp.ArrayHeaderSize + ((2) * (ProposalValueMaxSize()))
+ s += 5
+ // Calculating size of array: z.Sigs
+ s += msgp.ArrayHeaderSize + ((2) * (crypto.OneTimeSignatureMaxSize()))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *unauthenticatedProposal) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -9774,6 +10192,16 @@ func (z *unauthenticatedProposal) UnmarshalMsg(bts []byte) (o []byte, err error)
}
if zb0004 > 0 {
zb0004--
+ var zb0006 int
+ zb0006, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "GenesisID")
+ return
+ }
+ if zb0006 > config.MaxGenesisIDLen {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(config.MaxGenesisIDLen))
+ return
+ }
(*z).Block.BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisID")
@@ -9910,27 +10338,27 @@ func (z *unauthenticatedProposal) UnmarshalMsg(bts []byte) (o []byte, err error)
}
if zb0004 > 0 {
zb0004--
- var zb0006 int
- var zb0007 bool
- zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
- if zb0006 > protocol.NumStateProofTypes {
- err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumStateProofTypes))
+ if zb0007 > protocol.NumStateProofTypes {
+ err = msgp.ErrOverflow(uint64(zb0007), uint64(protocol.NumStateProofTypes))
err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
- if zb0007 {
+ if zb0008 {
(*z).Block.BlockHeader.StateProofTracking = nil
} else if (*z).Block.BlockHeader.StateProofTracking == nil {
- (*z).Block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData, zb0006)
+ (*z).Block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData, zb0007)
}
- for zb0006 > 0 {
+ for zb0007 > 0 {
var zb0001 protocol.StateProofType
var zb0002 bookkeeping.StateProofTrackingData
- zb0006--
+ zb0007--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
@@ -9946,24 +10374,24 @@ func (z *unauthenticatedProposal) UnmarshalMsg(bts []byte) (o []byte, err error)
}
if zb0004 > 0 {
zb0004--
- var zb0008 int
- var zb0009 bool
- zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0009 int
+ var zb0010 bool
+ zb0009, zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
return
}
- if zb0008 > config.MaxProposedExpiredOnlineAccounts {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxProposedExpiredOnlineAccounts))
+ if zb0009 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0009), uint64(config.MaxProposedExpiredOnlineAccounts))
err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
return
}
- if zb0009 {
+ if zb0010 {
(*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
- } else if (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0008 {
- (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0008]
+ } else if (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0009 {
+ (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0009]
} else {
- (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0008)
+ (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0009)
}
for zb0003 := range (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
bts, err = (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
@@ -9992,13 +10420,13 @@ func (z *unauthenticatedProposal) UnmarshalMsg(bts []byte) (o []byte, err error)
if zb0004 > 0 {
zb0004--
{
- var zb0010 uint64
- zb0010, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0011 uint64
+ zb0011, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "OriginalPeriod")
return
}
- (*z).OriginalPeriod = period(zb0010)
+ (*z).OriginalPeriod = period(zb0011)
}
}
if zb0004 > 0 {
@@ -10069,6 +10497,16 @@ func (z *unauthenticatedProposal) UnmarshalMsg(bts []byte) (o []byte, err error)
return
}
case "gen":
+ var zb0012 int
+ zb0012, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "GenesisID")
+ return
+ }
+ if zb0012 > config.MaxGenesisIDLen {
+ err = msgp.ErrOverflow(uint64(zb0012), uint64(config.MaxGenesisIDLen))
+ return
+ }
(*z).Block.BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "GenesisID")
@@ -10171,27 +10609,27 @@ func (z *unauthenticatedProposal) UnmarshalMsg(bts []byte) (o []byte, err error)
return
}
case "spt":
- var zb0011 int
- var zb0012 bool
- zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0013 int
+ var zb0014 bool
+ zb0013, zb0014, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StateProofTracking")
return
}
- if zb0011 > protocol.NumStateProofTypes {
- err = msgp.ErrOverflow(uint64(zb0011), uint64(protocol.NumStateProofTypes))
+ if zb0013 > protocol.NumStateProofTypes {
+ err = msgp.ErrOverflow(uint64(zb0013), uint64(protocol.NumStateProofTypes))
err = msgp.WrapError(err, "StateProofTracking")
return
}
- if zb0012 {
+ if zb0014 {
(*z).Block.BlockHeader.StateProofTracking = nil
} else if (*z).Block.BlockHeader.StateProofTracking == nil {
- (*z).Block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData, zb0011)
+ (*z).Block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData, zb0013)
}
- for zb0011 > 0 {
+ for zb0013 > 0 {
var zb0001 protocol.StateProofType
var zb0002 bookkeeping.StateProofTrackingData
- zb0011--
+ zb0013--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "StateProofTracking")
@@ -10205,24 +10643,24 @@ func (z *unauthenticatedProposal) UnmarshalMsg(bts []byte) (o []byte, err error)
(*z).Block.BlockHeader.StateProofTracking[zb0001] = zb0002
}
case "partupdrmv":
- var zb0013 int
- var zb0014 bool
- zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0015 int
+ var zb0016 bool
+ zb0015, zb0016, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ExpiredParticipationAccounts")
return
}
- if zb0013 > config.MaxProposedExpiredOnlineAccounts {
- err = msgp.ErrOverflow(uint64(zb0013), uint64(config.MaxProposedExpiredOnlineAccounts))
+ if zb0015 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0015), uint64(config.MaxProposedExpiredOnlineAccounts))
err = msgp.WrapError(err, "ExpiredParticipationAccounts")
return
}
- if zb0014 {
+ if zb0016 {
(*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
- } else if (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0013 {
- (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0013]
+ } else if (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0015 {
+ (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0015]
} else {
- (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0013)
+ (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0015)
}
for zb0003 := range (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
bts, err = (*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
@@ -10245,13 +10683,13 @@ func (z *unauthenticatedProposal) UnmarshalMsg(bts []byte) (o []byte, err error)
}
case "oper":
{
- var zb0015 uint64
- zb0015, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0017 uint64
+ zb0017, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "OriginalPeriod")
return
}
- (*z).OriginalPeriod = period(zb0015)
+ (*z).OriginalPeriod = period(zb0017)
}
case "oprop":
bts, err = (*z).OriginalProposer.UnmarshalMsg(bts)
@@ -10300,6 +10738,24 @@ func (z *unauthenticatedProposal) MsgIsZero() bool {
return ((*z).Block.BlockHeader.Round.MsgIsZero()) && ((*z).Block.BlockHeader.Branch.MsgIsZero()) && ((*z).Block.BlockHeader.Seed.MsgIsZero()) && ((*z).Block.BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).Block.BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).Block.BlockHeader.TimeStamp == 0) && ((*z).Block.BlockHeader.GenesisID == "") && ((*z).Block.BlockHeader.GenesisHash.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).Block.BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsRate == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).Block.BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).Block.BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).Block.BlockHeader.TxnCounter == 0) && (len((*z).Block.BlockHeader.StateProofTracking) == 0) && (len((*z).Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && ((*z).Block.Payset.MsgIsZero()) && ((*z).SeedProof.MsgIsZero()) && ((*z).OriginalPeriod == 0) && ((*z).OriginalProposer.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func UnauthenticatedProposalMaxSize() (s int) {
+ s = 3 + 4 + basics.RoundMaxSize() + 5 + bookkeeping.BlockHashMaxSize() + 5 + committee.SeedMaxSize() + 4 + crypto.DigestMaxSize() + 7 + crypto.DigestMaxSize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + config.MaxGenesisIDLen + 3 + crypto.DigestMaxSize() + 5 + basics.AddressMaxSize() + 4 + basics.AddressMaxSize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + basics.RoundMaxSize() + 6 + protocol.ConsensusVersionMaxSize() + 10 + protocol.ConsensusVersionMaxSize() + 8 + msgp.Uint64Size + 11 + basics.RoundMaxSize() + 11 + basics.RoundMaxSize() + 12 + protocol.ConsensusVersionMaxSize() + 13 + basics.RoundMaxSize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4
+ s += msgp.MapHeaderSize
+ // Adding size of map keys for z.Block.BlockHeader.StateProofTracking
+ s += protocol.NumStateProofTypes * (protocol.StateProofTypeMaxSize())
+ // Adding size of map values for z.Block.BlockHeader.StateProofTracking
+ s += protocol.NumStateProofTypes * (bookkeeping.StateProofTrackingDataMaxSize())
+ s += 11
+ // Calculating size of slice: z.Block.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts
+ s += msgp.ArrayHeaderSize + ((config.MaxProposedExpiredOnlineAccounts) * (basics.AddressMaxSize()))
+ s += 5
+ // Using maxtotalbytes for: z.Block.Payset
+ s += config.MaxTxnBytesPerBlock
+ s += 5 + crypto.VrfProofMaxSize() + 5 + msgp.Uint64Size + 6 + basics.AddressMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *unauthenticatedVote) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -10452,6 +10908,12 @@ func (z *unauthenticatedVote) MsgIsZero() bool {
return ((*z).R.MsgIsZero()) && ((*z).Cred.MsgIsZero()) && ((*z).Sig.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func UnauthenticatedVoteMaxSize() (s int) {
+ s = 1 + 2 + RawVoteMaxSize() + 5 + committee.UnauthenticatedCredentialMaxSize() + 4 + crypto.OneTimeSignatureMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *vote) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -10604,6 +11066,12 @@ func (z *vote) MsgIsZero() bool {
return ((*z).R.MsgIsZero()) && ((*z).Cred.MsgIsZero()) && ((*z).Sig.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func VoteMaxSize() (s int) {
+ s = 1 + 2 + RawVoteMaxSize() + 5 + committee.CredentialMaxSize() + 4 + crypto.OneTimeSignatureMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *voteAggregator) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -10682,6 +11150,12 @@ func (z *voteAggregator) MsgIsZero() bool {
return true
}
+// MaxSize returns a maximum valid message size for this message type
+func VoteAggregatorMaxSize() (s int) {
+ s = 1
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *voteAuthenticator) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -10822,6 +11296,12 @@ func (z *voteAuthenticator) MsgIsZero() bool {
return ((*z).Sender.MsgIsZero()) && ((*z).Cred.MsgIsZero()) && ((*z).Sig.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func VoteAuthenticatorMaxSize() (s int) {
+ s = 1 + 4 + basics.AddressMaxSize() + 5 + committee.UnauthenticatedCredentialMaxSize() + 4 + crypto.OneTimeSignatureMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *voteTracker) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -11174,6 +11654,21 @@ func (z *voteTracker) MsgIsZero() bool {
return (len((*z).Voters) == 0) && (len((*z).Counts) == 0) && (len((*z).Equivocators) == 0) && ((*z).EquivocatorsCount == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func VoteTrackerMaxSize() (s int) {
+ s = 1 + 7
+ s += msgp.MapHeaderSize
+ panic("Map z.Voters is unbounded")
+ s += 7
+ s += msgp.MapHeaderSize
+ panic("Map z.Counts is unbounded")
+ s += 13
+ s += msgp.MapHeaderSize
+ panic("Map z.Equivocators is unbounded")
+ s += 18 + msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *voteTrackerContract) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -11310,6 +11805,12 @@ func (z *voteTrackerContract) MsgIsZero() bool {
return ((*z).Step == 0) && ((*z).StepOk == false) && ((*z).Emitted == false)
}
+// MaxSize returns a maximum valid message size for this message type
+func VoteTrackerContractMaxSize() (s int) {
+ s = 1 + 5 + msgp.Uint64Size + 7 + msgp.BoolSize + 8 + msgp.BoolSize
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *voteTrackerPeriod) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -11538,6 +12039,12 @@ func (z *voteTrackerPeriod) MsgIsZero() bool {
return (((*z).Cached.Bottom == false) && ((*z).Cached.Proposal.MsgIsZero()))
}
+// MaxSize returns a maximum valid message size for this message type
+func VoteTrackerPeriodMaxSize() (s int) {
+ s = 1 + 7 + 1 + 7 + msgp.BoolSize + 9 + ProposalValueMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *voteTrackerRound) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -11648,3 +12155,9 @@ func (z *voteTrackerRound) Msgsize() (s int) {
func (z *voteTrackerRound) MsgIsZero() bool {
return ((*z).Freshest.MsgIsZero()) && ((*z).Ok == false)
}
+
+// MaxSize returns a maximum valid message size for this message type
+func VoteTrackerRoundMaxSize() (s int) {
+ s = 1 + 9 + ThresholdEventMaxSize() + 3 + msgp.BoolSize
+ return
+}
diff --git a/agreement/proposalManager.go b/agreement/proposalManager.go
index ca972b56e..affa17f89 100644
--- a/agreement/proposalManager.go
+++ b/agreement/proposalManager.go
@@ -43,15 +43,15 @@ func (m *proposalManager) underlying() listener {
// A proposalManager handles eight types of events:
//
-// - It applies message relay rules to votePresent, voteVerified,
-// payloadPresent, and payloadVerified events.
+// - It applies message relay rules to votePresent, voteVerified,
+// payloadPresent, and payloadVerified events.
//
// - It enters a new round given a roundInterruption.
//
-// - It enters a new period given a nextThreshold event. It also enters a new
-// period given a softThreshold/certThreshold event, if necessary.
-// - On entering a new period due to a softThreshold/certThreshold, it
-// dispatches this event to the proposalMachineRound.
+// - It enters a new period given a nextThreshold event. It also enters a new
+// period given a softThreshold/certThreshold event, if necessary.
+// - On entering a new period due to a softThreshold/certThreshold, it
+// dispatches this event to the proposalMachineRound.
//
// For more details, see each method's respective documentation below.
func (m *proposalManager) handle(r routerHandle, p player, e event) event {
@@ -99,30 +99,30 @@ func (m *proposalManager) handleNewPeriod(r routerHandle, p player, e thresholdE
// handleMessageEvent is called for {vote,payload}{Present,Verified} events.
//
-// - A votePresent event is delivered when the state machine receives a new
-// proposal-vote. A voteFiltered event is returned if the proposal-vote is
-// not fresh or is a duplicate. Otherwise, an empty event is returned.
+// - A votePresent event is delivered when the state machine receives a new
+// proposal-vote. A voteFiltered event is returned if the proposal-vote is
+// not fresh or is a duplicate. Otherwise, an empty event is returned.
//
-// - A voteVerified event is delievered after verification was attempted on a
-// proposal-vote. A voteMalformed event is returned if the proposal-vote is
-// ill-formed and resulted from a corrupt process. A voteFiltered event is
-// emitted if the vote is not fresh or is a duplicate. Otherwise the
-// proposal-vote is dispatched to the proposalMachineRound, and a voteFiltered
-// or a proposalAccepted event is returned.
+// - A voteVerified event is delievered after verification was attempted on a
+// proposal-vote. A voteMalformed event is returned if the proposal-vote is
+// ill-formed and resulted from a corrupt process. A voteFiltered event is
+// emitted if the vote is not fresh or is a duplicate. Otherwise the
+// proposal-vote is dispatched to the proposalMachineRound, and a voteFiltered
+// or a proposalAccepted event is returned.
//
-// - A payloadPresent event is delivered when the state machine receives a new
-// proposal payload. The payload is dispatched to both the
-// proposalMachineRound for the current round and the proposalMachineRound for
-// the next round. If both state machines return payloadRejected,
-// proposalManager also returns payloadRejected. Otherwise, one state machine
-// returned payloadPipelined, and the proposalManager propagates this event to
-// the parent, setting the event's round properly.
+// - A payloadPresent event is delivered when the state machine receives a new
+// proposal payload. The payload is dispatched to both the
+// proposalMachineRound for the current round and the proposalMachineRound for
+// the next round. If both state machines return payloadRejected,
+// proposalManager also returns payloadRejected. Otherwise, one state machine
+// returned payloadPipelined, and the proposalManager propagates this event to
+// the parent, setting the event's round properly.
//
-// - A payloadVerified event is delivered after validation was attempted on a
-// proposal payload. If the proposal payload was invalid, a payloadMalformed
-// event is returned. Otherwise, the event is dispatched to the
-// proposalMachineRound, and then the resulting payload{Rejected,Accepted} or
-// proposalCommittable event is returned.
+// - A payloadVerified event is delivered after validation was attempted on a
+// proposal payload. If the proposal payload was invalid, a payloadMalformed
+// event is returned. Otherwise, the event is dispatched to the
+// proposalMachineRound, and then the resulting payload{Rejected,Accepted} or
+// proposalCommittable event is returned.
func (m *proposalManager) handleMessageEvent(r routerHandle, p player, e filterableMessageEvent) (res event) {
var pipelinedRound round
var pipelinedPeriod period
diff --git a/agreement/proposalStore.go b/agreement/proposalStore.go
index 1b50fb6f9..080609de5 100644
--- a/agreement/proposalStore.go
+++ b/agreement/proposalStore.go
@@ -150,53 +150,53 @@ func (store *proposalStore) underlying() listener {
// A proposalStore handles six types of events:
//
-// - A voteVerified event is delivered when a relevant proposal-vote has passed
-// cryptographic verification. The proposalStore dispatches the event to the
-// proposalMachinePeriod and returns the resulting event. If the
-// proposalMachinePeriod accepts the event, the set of relevant
-// proposal-values is updated to match the one in the event. If there exists
-// a validated proposal payload matching the proposal-value specified by the
-// proposal-vote, it is attached to the event. The proposalStore is then
-// trimmed. The valid vote is cached as an authenticator.
+// - A voteVerified event is delivered when a relevant proposal-vote has passed
+// cryptographic verification. The proposalStore dispatches the event to the
+// proposalMachinePeriod and returns the resulting event. If the
+// proposalMachinePeriod accepts the event, the set of relevant
+// proposal-values is updated to match the one in the event. If there exists
+// a validated proposal payload matching the proposal-value specified by the
+// proposal-vote, it is attached to the event. The proposalStore is then
+// trimmed. The valid vote is cached as an authenticator.
//
-// - A payloadPresent event is delivered when the state machine receives a
-// proposal payloads. If the payload fails to match any relevant proposal, or
-// if the payload has already been seen by the state machine, payloadRejected
-// is returned. Otherwise, a payloadPipelined event is returned, with a
-// cached proposal-vote possibly set.
+// - A payloadPresent event is delivered when the state machine receives a
+// proposal payloads. If the payload fails to match any relevant proposal, or
+// if the payload has already been seen by the state machine, payloadRejected
+// is returned. Otherwise, a payloadPipelined event is returned, with a
+// cached proposal-vote possibly set.
//
-// - A payloadVerified event is delivered when a relevant proposal payload has
-// passed cryptographic verification. If the payload fails to match any
-// relevant proposal, or if the payload has already been seen by the state
-// machine, payloadRejected is returned. Otherwise, either a
-// proposalCommittable event or a payloadAccepted event is returned, depending
-// on whether the proposal matches the current staging value. This returned
-// event may have a cached authenticator set.
+// - A payloadVerified event is delivered when a relevant proposal payload has
+// passed cryptographic verification. If the payload fails to match any
+// relevant proposal, or if the payload has already been seen by the state
+// machine, payloadRejected is returned. Otherwise, either a
+// proposalCommittable event or a payloadAccepted event is returned, depending
+// on whether the proposal matches the current staging value. This returned
+// event may have a cached authenticator set.
//
-// - A newPeriod event is delivered when the player state machine enters a new
-// period. When this happens, the proposalStore updates Pinned, cleans up old
-// state, and then returns an empty event.
+// - A newPeriod event is delivered when the player state machine enters a new
+// period. When this happens, the proposalStore updates Pinned, cleans up old
+// state, and then returns an empty event.
//
-// - A newRound event is delivered when the player state machine enters a new
-// round. When this happens, the proposalStore returns a payloadPipelined
-// event with the proposal payload for the proposal-vote with the lowest
-// credential it has seen and possibly a cached authenticator (if not, it
-// returns an empty event).
+// - A newRound event is delivered when the player state machine enters a new
+// round. When this happens, the proposalStore returns a payloadPipelined
+// event with the proposal payload for the proposal-vote with the lowest
+// credential it has seen and possibly a cached authenticator (if not, it
+// returns an empty event).
//
-// - A soft/certThreshold event is delivered when the player state has observed a
-// quorum of soft/cert votes for the current round and period. The proposalStore
-// dispatches this event to the proposalMachinePeriod. If the proposalStore
-// has the proposal payload corresponding to the proposal-value of the quorum,
-// it returns a proposalCommittable event; otherwise, it propagates the
-// proposalAccepted event.
+// - A soft/certThreshold event is delivered when the player state has observed a
+// quorum of soft/cert votes for the current round and period. The proposalStore
+// dispatches this event to the proposalMachinePeriod. If the proposalStore
+// has the proposal payload corresponding to the proposal-value of the quorum,
+// it returns a proposalCommittable event; otherwise, it propagates the
+// proposalAccepted event.
//
-// - A readStaging event is dispatched to the proposalMachinePeriod. The proposalStore
-// sets the matching proposal payload (if one exists) in the response.
+// - A readStaging event is dispatched to the proposalMachinePeriod. The proposalStore
+// sets the matching proposal payload (if one exists) in the response.
//
-// - A readPinned event is delivered when the player wants to query the current
-// pinned proposalValue, and corresponding payload if one exists. This occurs
-// during resynchronization when players may relay the pinned value.
-// The event is handled exclusively by the proposalStore and not forwarded.
+// - A readPinned event is delivered when the player wants to query the current
+// pinned proposalValue, and corresponding payload if one exists. This occurs
+// during resynchronization when players may relay the pinned value.
+// The event is handled exclusively by the proposalStore and not forwarded.
func (store *proposalStore) handle(r routerHandle, p player, e event) event {
if store.Relevant == nil {
store.Relevant = make(map[period]proposalValue)
diff --git a/agreement/proposalTracker.go b/agreement/proposalTracker.go
index 4539a1f51..59ffb77a2 100644
--- a/agreement/proposalTracker.go
+++ b/agreement/proposalTracker.go
@@ -90,34 +90,34 @@ func (t *proposalTracker) underlying() listener {
// A proposalTracker handles five types of events.
//
-// - voteFilterRequest returns a voteFiltered event if a given proposal-vote
-// from a given sender has already been seen. Otherwise it returns an empty
-// event.
+// - voteFilterRequest returns a voteFiltered event if a given proposal-vote
+// from a given sender has already been seen. Otherwise it returns an empty
+// event.
//
-// - voteVerified is issued when a relevant proposal-vote has passed
-// cryptographic verification. If the proposalTracker has already seen a
-// proposal-vote from the same sender, a voteFiltered event is returned. If
-// the proposal-vote's credential is not lowest than the current lowest
-// credential, or if a proposalFrozen or softThreshold event has already been delivered,
-// voteFiltered is also returned. Otherwise, a proposalAccepted event is
-// returned. The returned event contains the proposal-value relevant to the
-// current period.
+// - voteVerified is issued when a relevant proposal-vote has passed
+// cryptographic verification. If the proposalTracker has already seen a
+// proposal-vote from the same sender, a voteFiltered event is returned. If
+// the proposal-vote's credential is not lowest than the current lowest
+// credential, or if a proposalFrozen or softThreshold event has already been delivered,
+// voteFiltered is also returned. Otherwise, a proposalAccepted event is
+// returned. The returned event contains the proposal-value relevant to the
+// current period.
//
-// - proposalFrozen is issued after the state machine has timed out waiting for
-// the vote with the lowest credential value and has settled on a value to
-// soft-vote. A proposalFrozen event tells this state machine to stop
-// accepting new proposal-votes. The proposalFrozen is returned and the best
-// vote proposal-value is returned. If none exists, bottom is returned.
+// - proposalFrozen is issued after the state machine has timed out waiting for
+// the vote with the lowest credential value and has settled on a value to
+// soft-vote. A proposalFrozen event tells this state machine to stop
+// accepting new proposal-votes. The proposalFrozen is returned and the best
+// vote proposal-value is returned. If none exists, bottom is returned.
//
-// - softThreshold is issued after the state machine has received a threshold of
-// soft votes for some value in the proposalTracker's period. The
-// softThreshold event sets the proposalTracker's staging value. A
-// proposalAccepted event is returned, which contains the proposal-value
-// relevant to the current period.
+// - softThreshold is issued after the state machine has received a threshold of
+// soft votes for some value in the proposalTracker's period. The
+// softThreshold event sets the proposalTracker's staging value. A
+// proposalAccepted event is returned, which contains the proposal-value
+// relevant to the current period.
//
-// - readStaging returns the a stagingValueEvent with the proposal-value
-// believed to be the staging value (i.e., sigma(S, r, p)) by the
-// proposalTracker in period p.
+// - readStaging returns the a stagingValueEvent with the proposal-value
+// believed to be the staging value (i.e., sigma(S, r, p)) by the
+// proposalTracker in period p.
func (t *proposalTracker) handle(r routerHandle, p player, e event) event {
switch e.t() {
case voteFilterRequest:
diff --git a/agreement/pseudonode.go b/agreement/pseudonode.go
index 3ebb681ca..78f6674d7 100644
--- a/agreement/pseudonode.go
+++ b/agreement/pseudonode.go
@@ -295,17 +295,17 @@ func (n asyncPseudonode) makeProposals(round basics.Round, period period, accoun
votes := make([]unauthenticatedVote, 0, len(accounts))
proposals := make([]proposal, 0, len(accounts))
for _, acc := range accounts {
- payload, proposal, err := proposalForBlock(acc.Account, acc.VRF, ve, period, n.ledger)
- if err != nil {
- n.log.Errorf("pseudonode.makeProposals: could not create proposal for block (address %v): %v", acc.Account, err)
+ payload, proposal, pErr := proposalForBlock(acc.Account, acc.VRF, ve, period, n.ledger)
+ if pErr != nil {
+ n.log.Errorf("pseudonode.makeProposals: could not create proposal for block (address %v): %v", acc.Account, pErr)
continue
}
// attempt to make the vote
rv := rawVote{Sender: acc.Account, Round: round, Period: period, Step: propose, Proposal: proposal}
- uv, err := makeVote(rv, acc.VotingSigner(), acc.VRF, n.ledger)
- if err != nil {
- n.log.Warnf("pseudonode.makeProposals: could not create vote: %v", err)
+ uv, vErr := makeVote(rv, acc.VotingSigner(), acc.VRF, n.ledger)
+ if vErr != nil {
+ n.log.Warnf("pseudonode.makeProposals: could not create vote: %v", vErr)
continue
}
diff --git a/agreement/router.go b/agreement/router.go
index 040fd525e..32523ee8e 100644
--- a/agreement/router.go
+++ b/agreement/router.go
@@ -19,6 +19,7 @@ package agreement
// A stateMachineTag uniquely identifies the type of a state machine.
//
// Rounds, periods, and steps may be used to further identify different state machine instances of the same type.
+//
//msgp:ignore stateMachineTag
type stateMachineTag int
diff --git a/agreement/sort.go b/agreement/sort.go
index f95060ca8..12a078074 100644
--- a/agreement/sort.go
+++ b/agreement/sort.go
@@ -25,6 +25,7 @@ import (
// These types are defined to satisfy SortInterface used by
// SortAddress is re-exported from basics.Address since the interface is already defined there
+//
//msgp:sort basics.Address SortAddress
type SortAddress = basics.SortAddress
@@ -33,6 +34,7 @@ type SortAddress = basics.SortAddress
type SortUint64 = basics.SortUint64
// SortStep defines SortInterface used by msgp to consistently sort maps with this type as key.
+//
//msgp:ignore SortStep
//msgp:sort step SortStep
type SortStep []step
@@ -42,6 +44,7 @@ func (a SortStep) Less(i, j int) bool { return a[i] < a[j] }
func (a SortStep) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// SortPeriod defines SortInterface used by msgp to consistently sort maps with this type as key.
+//
//msgp:ignore SortPeriod
//msgp:sort period SortPeriod
type SortPeriod []period
@@ -51,15 +54,18 @@ func (a SortPeriod) Less(i, j int) bool { return a[i] < a[j] }
func (a SortPeriod) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// SortRound defines SortInterface used by msgp to consistently sort maps with this type as key.
+// note, for type aliases the base type is used for the interface
+//
//msgp:ignore SortRound
-//msgp:sort round SortRound
-type SortRound []round
+//msgp:sort basics.Round SortRound
+type SortRound []basics.Round
func (a SortRound) Len() int { return len(a) }
func (a SortRound) Less(i, j int) bool { return a[i] < a[j] }
func (a SortRound) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// SortProposalValue defines SortInterface used by msgp to consistently sort maps with this type as key.
+//
//msgp:ignore SortProposalValue
//msgp:sort proposalValue SortProposalValue
type SortProposalValue []proposalValue
diff --git a/agreement/voteAggregator.go b/agreement/voteAggregator.go
index 29d5e2b54..b031ac9fa 100644
--- a/agreement/voteAggregator.go
+++ b/agreement/voteAggregator.go
@@ -43,32 +43,38 @@ func (agg *voteAggregator) underlying() listener {
// A voteAggregator handles four types of events:
//
-// - votePresent is issued when a new vote arrives at the state machine. A
-// voteFiltered event is emitted in response if the vote is either stale or
-// an equivocating duplicate. Otherwise an empty event is returned.
+// - votePresent is issued when a new vote arrives at the state machine. A
+// voteFiltered event is emitted in response if the vote is either stale or
+// an equivocating duplicate. Otherwise an empty event is returned.
//
-// - voteVerified is issued after the agreement service has attempted
-// cryptographic verification on a given vote.
-// - A voteMalformed event is emitted if the ill-formed vote was the result
-// of some corrupt process.
-// - A voteFiltered event is emitted if the vote is either stale or an
-// equivocating duplicate.
-// - Otherwise, the vote is observed. thresholdEvents occur in the current
-// round are propagated up to the parent, while thresholdEvents that occur
-// the next round are pipelined for the next round.
+// - voteVerified is issued after the agreement service has attempted
+// cryptographic verification on a given vote.
//
-// - bundlePresent is issued when a new bundle arrives at the state machine. A
-// bundleFiltered event is emitted in response if the bundle is stale.
-// Otherwise an empty event is returned.
+// - A voteMalformed event is emitted if the ill-formed vote was the result
+// of some corrupt process.
//
-// - bundleVerified is issued after agreement service has attempted
-// cryptographic verification on a given bundle.
-// - A bundleMalformed event is emitted if the ill-formed bundle was the
-// result of some corrupt process.
-// - A bundleFiltered event is emitted if the bundle is stale.
-// - Otherwise, the bundle is observed. If observing the bundle causes a
-// thresholdEvent to occur, the thresholdEvent is propagated to the
-// parent. Otherwise, a bundleFiltered event is propagated to the parent.
+// - A voteFiltered event is emitted if the vote is either stale or an
+// equivocating duplicate.
+//
+// - Otherwise, the vote is observed. thresholdEvents occur in the current
+// round are propagated up to the parent, while thresholdEvents that occur
+// the next round are pipelined for the next round.
+//
+// - bundlePresent is issued when a new bundle arrives at the state machine. A
+// bundleFiltered event is emitted in response if the bundle is stale.
+// Otherwise an empty event is returned.
+//
+// - bundleVerified is issued after agreement service has attempted
+// cryptographic verification on a given bundle.
+//
+// - A bundleMalformed event is emitted if the ill-formed bundle was the
+// result of some corrupt process.
+//
+// - A bundleFiltered event is emitted if the bundle is stale.
+//
+// - Otherwise, the bundle is observed. If observing the bundle causes a
+// thresholdEvent to occur, the thresholdEvent is propagated to the
+// parent. Otherwise, a bundleFiltered event is propagated to the parent.
func (agg *voteAggregator) handle(r routerHandle, pr player, em event) (res event) {
e := em.(filterableMessageEvent)
defer func() {
diff --git a/agreement/voteAuxiliary.go b/agreement/voteAuxiliary.go
index d560c4f01..fa8bfd36b 100644
--- a/agreement/voteAuxiliary.go
+++ b/agreement/voteAuxiliary.go
@@ -89,11 +89,12 @@ func (t *voteTrackerPeriod) handle(r routerHandle, p player, e event) event {
//
// Bundle "freshness" is an ordering relation defined on thresholdEvents. The
// relation is defined as follows:
-// - certThresholds are fresher than other kinds of thresholdEvent.
-// - other thresholdEvents are fresher than thresholdEvents from older periods.
-// - nextThresholds are fresher than softThreshold in the same period.
-// - nextThresholds for the bottom proposal-value are fresher than
-// nextThresholds for another proposal-value.
+// - certThresholds are fresher than other kinds of thresholdEvent.
+// - other thresholdEvents are fresher than thresholdEvents from older periods.
+// - nextThresholds are fresher than softThreshold in the same period.
+// - nextThresholds for the bottom proposal-value are fresher than
+// nextThresholds for another proposal-value.
+//
// (Note that the step of a nextThreshold does not affect its freshness.)
//
// It handles the following type(s) of event: voteAcceptedEvent, freshestBundleRequest, nextThreshold
diff --git a/agreement/voteTrackerContract.go b/agreement/voteTrackerContract.go
index 095523097..31470caa4 100644
--- a/agreement/voteTrackerContract.go
+++ b/agreement/voteTrackerContract.go
@@ -23,24 +23,25 @@ import (
// A voteMachine should track a new vote.
//
// Preconditions:
-// - e.T = voteAccepted or voteFilterRequest
-// - v.R.Step != propose
-// - for all votes v = e.Vote, v.R.Step is the same
-// - (Algorand safety assumptions on the equivocation of votes)
+// - e.T = voteAccepted or voteFilterRequest
+// - v.R.Step != propose
+// - for all votes v = e.Vote, v.R.Step is the same
+// - (Algorand safety assumptions on the equivocation of votes)
//
// Postconditions (let e be the returned event):
-// if Input is of type voteAccepted:
-// - e.T is one of {none, {soft,cert,next}Threshold}
-// - e.T corresponds to the input event's step (e.g. if the input event had v.R.Step = soft, then e.T is either none or softThreshold)
-// - e.T != none if and only if e.Bundle contains a valid bundle for e.Proposal
-// - if e.T is a {soft,cert}Threshold event, it will be emitted at most once and e.Proposal != bottom
-// - if e.T is a {next}Threshold event, it will be emitted at most once and e.Proposal != bottom
-// if Input is of type voteFilterRequest:
-// - e.T is one of {none, voteFilteredStep}
-// - e.T = none for a given input only once (the first time the vote is seen, if we have not previously detected equivocation
+//
+// if Input is of type voteAccepted:
+// - e.T is one of {none, {soft,cert,next}Threshold}
+// - e.T corresponds to the input event's step (e.g. if the input event had v.R.Step = soft, then e.T is either none or softThreshold)
+// - e.T != none if and only if e.Bundle contains a valid bundle for e.Proposal
+// - if e.T is a {soft,cert}Threshold event, it will be emitted at most once and e.Proposal != bottom
+// - if e.T is a {next}Threshold event, it will be emitted at most once and e.Proposal != bottom
+// if Input is of type voteFilterRequest:
+// - e.T is one of {none, voteFilteredStep}
+// - e.T = none for a given input only once (the first time the vote is seen, if we have not previously detected equivocation
//
// Trace properties
-// - voteFilterRequest is idempotent
+// - voteFilterRequest is idempotent
type voteTrackerContract struct {
_struct struct{} `codec:","`
diff --git a/buildnumber.dat b/buildnumber.dat
index 00750edc0..573541ac9 100644
--- a/buildnumber.dat
+++ b/buildnumber.dat
@@ -1 +1 @@
-3
+0
diff --git a/catchup/catchpointService.go b/catchup/catchpointService.go
index 720f25790..801901048 100644
--- a/catchup/catchpointService.go
+++ b/catchup/catchpointService.go
@@ -156,11 +156,19 @@ func MakeNewCatchpointCatchupService(catchpoint string, node CatchpointCatchupNo
}
// Start starts the catchpoint catchup service ( continue in the process )
-func (cs *CatchpointCatchupService) Start(ctx context.Context) {
+func (cs *CatchpointCatchupService) Start(ctx context.Context) error {
+ // Only check catchpoint ledger validity if we're starting new
+ if cs.stage == ledger.CatchpointCatchupStateInactive {
+ err := cs.checkLedgerDownload()
+ if err != nil {
+ return fmt.Errorf("aborting catchup Start(): %s", err)
+ }
+ }
cs.ctx, cs.cancelCtxFunc = context.WithCancel(ctx)
cs.abortCtx, cs.abortCtxFunc = context.WithCancel(context.Background())
cs.running.Add(1)
go cs.run()
+ return nil
}
// Abort aborts the catchpoint catchup process
@@ -803,3 +811,26 @@ func (cs *CatchpointCatchupService) initDownloadPeerSelector() {
})
}
}
+
+// checkLedgerDownload sends a HEAD request to the ledger endpoint of peers to validate the catchpoint's availability
+// before actually starting the catchup process.
+// The error returned is either from an unsuccessful request or a successful request that did not return a 200.
+func (cs *CatchpointCatchupService) checkLedgerDownload() error {
+ round, _, err := ledgercore.ParseCatchpointLabel(cs.stats.CatchpointLabel)
+ if err != nil {
+ return fmt.Errorf("failed to parse catchpoint label : %v", err)
+ }
+ peerSelector := makePeerSelector(cs.net, []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookRelays}})
+ ledgerFetcher := makeLedgerFetcher(cs.net, cs.ledgerAccessor, cs.log, cs, cs.config)
+ for i := 0; i < cs.config.CatchupLedgerDownloadRetryAttempts; i++ {
+ psp, peerError := peerSelector.getNextPeer()
+ if peerError != nil {
+ return err
+ }
+ err = ledgerFetcher.headLedger(context.Background(), psp.Peer, round)
+ if err == nil {
+ return nil
+ }
+ }
+ return fmt.Errorf("checkLedgerDownload(): catchpoint '%s' unavailable from peers: %s", cs.stats.CatchpointLabel, err)
+}
diff --git a/catchup/catchpointService_test.go b/catchup/catchpointService_test.go
index de91b456e..48cea110d 100644
--- a/catchup/catchpointService_test.go
+++ b/catchup/catchpointService_test.go
@@ -27,6 +27,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -76,6 +77,11 @@ func (m *catchpointCatchupAccessorMock) Ledger() (l ledger.CatchupAccessorClient
return m.l
}
+// GetVerifyData returns the balances hash, spver hash and totals used by VerifyCatchpoint
+func (m *catchpointCatchupAccessorMock) GetVerifyData(ctx context.Context) (balancesHash crypto.Digest, spverHash crypto.Digest, totals ledgercore.AccountTotals, err error) {
+ return crypto.Digest{}, crypto.Digest{}, ledgercore.AccountTotals{}, nil
+}
+
// TestCatchpointServicePeerRank ensures CatchpointService does not crash when a block fetched
// from the local ledger and not from network when ranking a peer
func TestCatchpointServicePeerRank(t *testing.T) {
diff --git a/catchup/fetcher_test.go b/catchup/fetcher_test.go
index df762bc43..e24286e8d 100644
--- a/catchup/fetcher_test.go
+++ b/catchup/fetcher_test.go
@@ -268,7 +268,7 @@ func (p *testUnicastPeer) Request(ctx context.Context, tag protocol.Tag, topics
}
}
-func (p *testUnicastPeer) Respond(ctx context.Context, reqMsg network.IncomingMessage, responseTopics network.Topics) (e error) {
+func (p *testUnicastPeer) Respond(ctx context.Context, reqMsg network.IncomingMessage, outMsg network.OutgoingMessage) (e error) {
hashKey := uint64(0)
channel, found := p.responseChannels[hashKey]
@@ -276,7 +276,7 @@ func (p *testUnicastPeer) Respond(ctx context.Context, reqMsg network.IncomingMe
}
select {
- case channel <- &network.Response{Topics: responseTopics}:
+ case channel <- &network.Response{Topics: outMsg.Topics}:
default:
}
diff --git a/catchup/ledgerFetcher.go b/catchup/ledgerFetcher.go
index bf79c5d3b..43a039a09 100644
--- a/catchup/ledgerFetcher.go
+++ b/catchup/ledgerFetcher.go
@@ -73,35 +73,63 @@ func makeLedgerFetcher(net network.GossipNode, accessor ledger.CatchpointCatchup
}
}
-func (lf *ledgerFetcher) downloadLedger(ctx context.Context, peer network.Peer, round basics.Round) error {
- httpPeer, ok := peer.(network.HTTPPeer)
- if !ok {
- return errNonHTTPPeer
- }
- return lf.getPeerLedger(ctx, httpPeer, round)
-}
-
-func (lf *ledgerFetcher) getPeerLedger(ctx context.Context, peer network.HTTPPeer, round basics.Round) error {
+func (lf *ledgerFetcher) requestLedger(ctx context.Context, peer network.HTTPPeer, round basics.Round, method string) (*http.Response, error) {
parsedURL, err := network.ParseHostOrURL(peer.GetAddress())
if err != nil {
- return err
+ return nil, err
}
parsedURL.Path = lf.net.SubstituteGenesisID(path.Join(parsedURL.Path, "/v1/{genesisID}/ledger/"+strconv.FormatUint(uint64(round), 36)))
ledgerURL := parsedURL.String()
- lf.log.Debugf("ledger GET %#v peer %#v %T", ledgerURL, peer, peer)
- request, err := http.NewRequest(http.MethodGet, ledgerURL, nil)
+ lf.log.Debugf("ledger %s %#v peer %#v %T", method, ledgerURL, peer, peer)
+ request, err := http.NewRequestWithContext(ctx, method, ledgerURL, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ network.SetUserAgentHeader(request.Header)
+ return peer.GetHTTPClient().Do(request)
+}
+
+func (lf *ledgerFetcher) headLedger(ctx context.Context, peer network.Peer, round basics.Round) error {
+ httpPeer, ok := peer.(network.HTTPPeer)
+ if !ok {
+ return errNonHTTPPeer
+ }
+ timeoutContext, timeoutContextCancel := context.WithTimeout(ctx, lf.config.MaxCatchpointDownloadDuration)
+ defer timeoutContextCancel()
+ response, err := lf.requestLedger(timeoutContext, httpPeer, round, http.MethodHead)
if err != nil {
+ lf.log.Debugf("getPeerLedger HEAD : %s", err)
return err
}
+ defer func() { _ = response.Body.Close() }()
+
+ // check to see that we had no errors.
+ switch response.StatusCode {
+ case http.StatusOK:
+ return nil
+ case http.StatusNotFound: // server could not find a block with that round number.
+ return errNoLedgerForRound
+ default:
+ return fmt.Errorf("headLedger error response status code %d", response.StatusCode)
+ }
+}
+
+func (lf *ledgerFetcher) downloadLedger(ctx context.Context, peer network.Peer, round basics.Round) error {
+ httpPeer, ok := peer.(network.HTTPPeer)
+ if !ok {
+ return errNonHTTPPeer
+ }
+ return lf.getPeerLedger(ctx, httpPeer, round)
+}
+func (lf *ledgerFetcher) getPeerLedger(ctx context.Context, peer network.HTTPPeer, round basics.Round) error {
timeoutContext, timeoutContextCancel := context.WithTimeout(ctx, lf.config.MaxCatchpointDownloadDuration)
defer timeoutContextCancel()
- request = request.WithContext(timeoutContext)
- network.SetUserAgentHeader(request.Header)
- response, err := peer.GetHTTPClient().Do(request)
+ response, err := lf.requestLedger(timeoutContext, peer, round, http.MethodGet)
if err != nil {
- lf.log.Debugf("getPeerLedger GET %v : %s", ledgerURL, err)
+ lf.log.Debugf("getPeerLedger GET : %s", err)
return err
}
defer response.Body.Close()
diff --git a/catchup/ledgerFetcher_test.go b/catchup/ledgerFetcher_test.go
index 248192c4f..f5039e991 100644
--- a/catchup/ledgerFetcher_test.go
+++ b/catchup/ledgerFetcher_test.go
@@ -59,6 +59,74 @@ func TestNonParsableAddress(t *testing.T) {
func TestLedgerFetcherErrorResponseHandling(t *testing.T) {
partitiontest.PartitionTest(t)
+ testcases := []struct {
+ name string
+ httpServerResponse int
+ contentTypes []string
+ err error
+ }{
+ {
+ name: "getPeerLedger 400 Response",
+ httpServerResponse: http.StatusNotFound,
+ contentTypes: make([]string, 0),
+ err: errNoLedgerForRound,
+ },
+ {
+ name: "getPeerLedger 500 Response",
+ httpServerResponse: http.StatusInternalServerError,
+ contentTypes: make([]string, 0),
+ err: fmt.Errorf("getPeerLedger error response status code %d", http.StatusInternalServerError),
+ },
+ {
+ name: "getPeerLedger No Content Type",
+ httpServerResponse: http.StatusOK,
+ contentTypes: make([]string, 0),
+ err: fmt.Errorf("getPeerLedger : http ledger fetcher invalid content type count %d", 0),
+ },
+ {
+ name: "getPeerLedger Too Many Content Types",
+ httpServerResponse: http.StatusOK,
+ contentTypes: []string{"applications/one", "applications/two"},
+ err: fmt.Errorf("getPeerLedger : http ledger fetcher invalid content type count %d", 2),
+ },
+ {
+ name: "getPeerLedger Invalid Content Type",
+ httpServerResponse: http.StatusOK,
+ contentTypes: []string{"applications/one"},
+ err: fmt.Errorf("getPeerLedger : http ledger fetcher response has an invalid content type : %s", "applications/one"),
+ },
+ }
+ for _, tc := range testcases {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ // create a dummy server.
+ mux := http.NewServeMux()
+ s := &http.Server{
+ Handler: mux,
+ }
+ listener, err := net.Listen("tcp", "localhost:")
+
+ require.NoError(t, err)
+ go s.Serve(listener)
+ defer s.Close()
+ defer listener.Close()
+ mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
+ for _, contentType := range tc.contentTypes {
+ w.Header().Add("Content-Type", contentType)
+ }
+ w.WriteHeader(tc.httpServerResponse)
+ })
+ lf := makeLedgerFetcher(&mocks.MockNetwork{}, &mocks.MockCatchpointCatchupAccessor{}, logging.TestingLog(t), &dummyLedgerFetcherReporter{}, config.GetDefaultLocal())
+ peer := testHTTPPeer(listener.Addr().String())
+ err = lf.getPeerLedger(context.Background(), &peer, basics.Round(0))
+ require.Equal(t, tc.err, err)
+ })
+ }
+}
+
+func TestLedgerFetcherHeadLedger(t *testing.T) {
+ partitiontest.PartitionTest(t)
// create a dummy server.
mux := http.NewServeMux()
@@ -67,38 +135,42 @@ func TestLedgerFetcherErrorResponseHandling(t *testing.T) {
}
listener, err := net.Listen("tcp", "localhost:")
+ var httpServerResponse = 0
+ var contentTypes = make([]string, 0)
require.NoError(t, err)
go s.Serve(listener)
defer s.Close()
defer listener.Close()
-
- httpServerResponse := http.StatusNotFound
- contentTypes := make([]string, 0)
mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
for _, contentType := range contentTypes {
w.Header().Add("Content-Type", contentType)
}
w.WriteHeader(httpServerResponse)
})
-
+ successPeer := testHTTPPeer(listener.Addr().String())
lf := makeLedgerFetcher(&mocks.MockNetwork{}, &mocks.MockCatchpointCatchupAccessor{}, logging.TestingLog(t), &dummyLedgerFetcherReporter{}, config.GetDefaultLocal())
- peer := testHTTPPeer(listener.Addr().String())
- err = lf.getPeerLedger(context.Background(), &peer, basics.Round(0))
- require.Equal(t, errNoLedgerForRound, err)
- httpServerResponse = http.StatusInternalServerError
- err = lf.getPeerLedger(context.Background(), &peer, basics.Round(0))
- require.Equal(t, fmt.Errorf("getPeerLedger error response status code %d", httpServerResponse), err)
+ // headLedger non-http peer
+ err = lf.headLedger(context.Background(), nil, basics.Round(0))
+ require.Equal(t, errNonHTTPPeer, err)
- httpServerResponse = http.StatusOK
- err = lf.getPeerLedger(context.Background(), &peer, basics.Round(0))
- require.Equal(t, fmt.Errorf("getPeerLedger : http ledger fetcher invalid content type count %d", 0), err)
+ // headLedger parseURL failure
+ parseFailurePeer := testHTTPPeer("foobar")
+ err = lf.headLedger(context.Background(), &parseFailurePeer, basics.Round(0))
+ require.Equal(t, fmt.Errorf("could not parse a host from url"), err)
+
+ // headLedger 404 response
+ httpServerResponse = http.StatusNotFound
+ err = lf.headLedger(context.Background(), &successPeer, basics.Round(0))
+ require.Equal(t, errNoLedgerForRound, err)
- contentTypes = []string{"applications/one", "applications/two"}
- err = lf.getPeerLedger(context.Background(), &peer, basics.Round(0))
- require.Equal(t, fmt.Errorf("getPeerLedger : http ledger fetcher invalid content type count %d", len(contentTypes)), err)
+ // headLedger 200 response
+ httpServerResponse = http.StatusOK
+ err = lf.headLedger(context.Background(), &successPeer, basics.Round(0))
+ require.NoError(t, err)
- contentTypes = []string{"applications/one"}
- err = lf.getPeerLedger(context.Background(), &peer, basics.Round(0))
- require.Equal(t, fmt.Errorf("getPeerLedger : http ledger fetcher response has an invalid content type : %s", contentTypes[0]), err)
+ // headLedger 500 response
+ httpServerResponse = http.StatusInternalServerError
+ err = lf.headLedger(context.Background(), &successPeer, basics.Round(0))
+ require.Equal(t, fmt.Errorf("headLedger error response status code %d", http.StatusInternalServerError), err)
}
diff --git a/catchup/peerSelector.go b/catchup/peerSelector.go
index b78d802fa..a75e7c61f 100644
--- a/catchup/peerSelector.go
+++ b/catchup/peerSelector.go
@@ -47,12 +47,16 @@ const (
peerRank3LowBlockTime = 601
peerRank3HighBlockTime = 799
+ peerRankInitialFifthPriority = 800
+ peerRank4LowBlockTime = 801
+ peerRank4HighBlockTime = 999
+
// peerRankDownloadFailed is used for responses which could be temporary, such as missing files, or such that we don't
// have clear resolution
- peerRankDownloadFailed = 900
+ peerRankDownloadFailed = 10000
// peerRankInvalidDownload is used for responses which are likely to be invalid - whether it's serving the wrong content
// or attempting to serve malicious content
- peerRankInvalidDownload = 1000
+ peerRankInvalidDownload = 12000
// once a block is downloaded, the download duration is clamped into the range of [lowBlockDownloadThreshold..highBlockDownloadThreshold] and
// then mapped into the a ranking range.
@@ -383,8 +387,10 @@ func (ps *peerSelector) peerDownloadDurationToRank(psp *peerSelectorPeer, blockD
return downloadDurationToRank(blockDownloadDuration, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank1LowBlockTime, peerRank1HighBlockTime)
case peerRankInitialThirdPriority:
return downloadDurationToRank(blockDownloadDuration, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank2LowBlockTime, peerRank2HighBlockTime)
- default: // i.e. peerRankInitialFourthPriority
+ case peerRankInitialFourthPriority:
return downloadDurationToRank(blockDownloadDuration, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank3LowBlockTime, peerRank3HighBlockTime)
+ default: // i.e. peerRankInitialFifthPriority
+ return downloadDurationToRank(blockDownloadDuration, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank4LowBlockTime, peerRank4HighBlockTime)
}
}
@@ -520,8 +526,10 @@ func lowerBound(class peerClass) int {
return peerRank1LowBlockTime
case peerRankInitialThirdPriority:
return peerRank2LowBlockTime
- default: // i.e. peerRankInitialFourthPriority
+ case peerRankInitialFourthPriority:
return peerRank3LowBlockTime
+ default: // i.e. peerRankInitialFifthPriority
+ return peerRank4LowBlockTime
}
}
@@ -533,8 +541,10 @@ func upperBound(class peerClass) int {
return peerRank1HighBlockTime
case peerRankInitialThirdPriority:
return peerRank2HighBlockTime
- default: // i.e. peerRankInitialFourthPriority
+ case peerRankInitialFourthPriority:
return peerRank3HighBlockTime
+ default: // i.e. peerRankInitialFifthPriority
+ return peerRank4HighBlockTime
}
}
diff --git a/catchup/peerSelector_test.go b/catchup/peerSelector_test.go
index 8e66b9ab8..4991143ef 100644
--- a/catchup/peerSelector_test.go
+++ b/catchup/peerSelector_test.go
@@ -59,7 +59,7 @@ func (d *mockUnicastPeer) Version() string {
func (d *mockUnicastPeer) Request(ctx context.Context, tag network.Tag, topics network.Topics) (resp *network.Response, e error) {
return nil, nil
}
-func (d *mockUnicastPeer) Respond(ctx context.Context, reqMsg network.IncomingMessage, topics network.Topics) (e error) {
+func (d *mockUnicastPeer) Respond(ctx context.Context, reqMsg network.IncomingMessage, outMsg network.OutgoingMessage) (e error) {
return nil
}
@@ -381,12 +381,12 @@ func TestPeersDownloadFailed(t *testing.T) {
if len(peerSelector.pools) == 2 {
b1orb2 := peerAddress(peerSelector.pools[0].peers[1].peer) == "b1" || peerAddress(peerSelector.pools[0].peers[1].peer) == "b2"
require.True(t, b1orb2)
- require.Equal(t, peerSelector.pools[1].rank, 900)
+ require.Equal(t, peerSelector.pools[1].rank, peerRankDownloadFailed)
require.Equal(t, len(peerSelector.pools[1].peers), 3)
} else { // len(pools) == 3
b1orb2 := peerAddress(peerSelector.pools[1].peers[0].peer) == "b1" || peerAddress(peerSelector.pools[1].peers[0].peer) == "b2"
require.True(t, b1orb2)
- require.Equal(t, peerSelector.pools[2].rank, 900)
+ require.Equal(t, peerSelector.pools[2].rank, peerRankDownloadFailed)
require.Equal(t, len(peerSelector.pools[2].peers), 3)
}
@@ -459,6 +459,7 @@ func TestPeerDownloadDurationToRank(t *testing.T) {
peers2 := []network.Peer{&mockHTTPPeer{address: "b1"}, &mockHTTPPeer{address: "b2"}}
peers3 := []network.Peer{&mockHTTPPeer{address: "c1"}, &mockHTTPPeer{address: "c2"}}
peers4 := []network.Peer{&mockHTTPPeer{address: "d1"}, &mockHTTPPeer{address: "b2"}}
+ peers5 := []network.Peer{&mockHTTPPeer{address: "e1"}, &mockHTTPPeer{address: "b2"}}
peerSelector := makePeerSelector(
makePeersRetrieverStub(func(options ...network.PeerOption) (peers []network.Peer) {
@@ -469,15 +470,18 @@ func TestPeerDownloadDurationToRank(t *testing.T) {
peers = append(peers, peers2...)
} else if opt == network.PeersConnectedOut {
peers = append(peers, peers3...)
- } else {
+ } else if opt == network.PeersPhonebookArchivalNodes {
peers = append(peers, peers4...)
+ } else { // PeersConnectedIn
+ peers = append(peers, peers5...)
}
}
return
}), []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers},
{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays},
{initialRank: peerRankInitialThirdPriority, peerClass: network.PeersConnectedOut},
- {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersConnectedIn}},
+ {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersPhonebookArchivalNodes},
+ {initialRank: peerRankInitialFifthPriority, peerClass: network.PeersConnectedIn}},
)
_, err := peerSelector.getNextPeer()
@@ -490,7 +494,10 @@ func TestPeerDownloadDurationToRank(t *testing.T) {
require.Equal(t, downloadDurationToRank(500*time.Millisecond, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank2LowBlockTime, peerRank2HighBlockTime),
peerSelector.peerDownloadDurationToRank(&peerSelectorPeer{peers3[0], network.PeersConnectedOut}, 500*time.Millisecond))
require.Equal(t, downloadDurationToRank(500*time.Millisecond, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank3LowBlockTime, peerRank3HighBlockTime),
- peerSelector.peerDownloadDurationToRank(&peerSelectorPeer{peers4[0], network.PeersConnectedIn}, 500*time.Millisecond))
+ peerSelector.peerDownloadDurationToRank(&peerSelectorPeer{peers4[0], network.PeersPhonebookArchivalNodes}, 500*time.Millisecond))
+ require.Equal(t, downloadDurationToRank(500*time.Millisecond, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank4LowBlockTime, peerRank4HighBlockTime),
+ peerSelector.peerDownloadDurationToRank(&peerSelectorPeer{peers5[0], network.PeersConnectedIn}, 500*time.Millisecond))
+
}
func TestLowerUpperBounds(t *testing.T) {
@@ -499,23 +506,26 @@ func TestLowerUpperBounds(t *testing.T) {
classes := []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers},
{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays},
{initialRank: peerRankInitialThirdPriority, peerClass: network.PeersConnectedOut},
- {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersConnectedIn}}
+ {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersConnectedIn},
+ {initialRank: peerRankInitialFifthPriority, peerClass: network.PeersConnectedIn}}
require.Equal(t, peerRank0LowBlockTime, lowerBound(classes[0]))
require.Equal(t, peerRank1LowBlockTime, lowerBound(classes[1]))
require.Equal(t, peerRank2LowBlockTime, lowerBound(classes[2]))
require.Equal(t, peerRank3LowBlockTime, lowerBound(classes[3]))
+ require.Equal(t, peerRank4LowBlockTime, lowerBound(classes[4]))
require.Equal(t, peerRank0HighBlockTime, upperBound(classes[0]))
require.Equal(t, peerRank1HighBlockTime, upperBound(classes[1]))
require.Equal(t, peerRank2HighBlockTime, upperBound(classes[2]))
require.Equal(t, peerRank3HighBlockTime, upperBound(classes[3]))
+ require.Equal(t, peerRank4HighBlockTime, upperBound(classes[4]))
}
func TestFullResetRequestPenalty(t *testing.T) {
partitiontest.PartitionTest(t)
- class := peerClass{initialRank: 10, peerClass: network.PeersPhonebookArchivers}
+ class := peerClass{initialRank: 0, peerClass: network.PeersPhonebookArchivers}
hs := makeHistoricStatus(10, class)
hs.push(5, 1, class)
require.Equal(t, 1, len(hs.requestGaps))
@@ -524,6 +534,30 @@ func TestFullResetRequestPenalty(t *testing.T) {
require.Equal(t, 0, len(hs.requestGaps))
}
+// TesPenaltyBounds makes sure that the penalty does not demote the peer to a lower class,
+// and resetting the penalty of a demoted peer does not promote it back
+func TestPenaltyBounds(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ class := peerClass{initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookArchivers}
+ hs := makeHistoricStatus(peerHistoryWindowSize, class)
+ for x := 0; x < 65; x++ {
+ r0 := hs.push(peerRank2LowBlockTime+50, uint64(x+1), class)
+ require.LessOrEqual(t, peerRank2LowBlockTime, r0)
+ require.GreaterOrEqual(t, peerRank2HighBlockTime, r0)
+ }
+
+ r1 := hs.resetRequestPenalty(4, peerRankInitialThirdPriority, class)
+ r2 := hs.resetRequestPenalty(10, peerRankInitialThirdPriority, class)
+ r3 := hs.resetRequestPenalty(10, peerRankDownloadFailed, class)
+
+ // r2 is at a better rank than r1 because it has one penalty less
+ require.Greater(t, r1, r2)
+
+ // r3 is worse rank at peerRankDownloadFailed because it was demoted and resetRequestPenalty should not improve it
+ require.Equal(t, peerRankDownloadFailed, r3)
+}
+
// TestClassUpperBound makes sure the peer rank does not exceed the class upper bound
// This was a bug where the resetRequestPenalty was not bounding the returned rank, and was having download failures.
// Initializing rankSamples to 0 makes this works, since the dropped value subtracts 0 from rankSum.
@@ -613,7 +647,7 @@ func TestEvictionAndUpgrade(t *testing.T) {
_, err := peerSelector.getNextPeer()
require.NoError(t, err)
for i := 0; i < 10; i++ {
- if peerSelector.pools[len(peerSelector.pools)-1].rank == 900 {
+ if peerSelector.pools[len(peerSelector.pools)-1].rank == peerRankDownloadFailed {
require.Equal(t, 6, i)
break
}
diff --git a/catchup/service.go b/catchup/service.go
index ac5fa730d..5c08e60e3 100644
--- a/catchup/service.go
+++ b/catchup/service.go
@@ -20,6 +20,7 @@ import (
"context"
"errors"
"fmt"
+ "strings"
"sync"
"sync/atomic"
"time"
@@ -125,7 +126,7 @@ func MakeService(log logging.Logger, config config.Local, net network.GossipNode
s.parallelBlocks = config.CatchupParallelBlocks
s.deadlineTimeout = agreement.DeadlineTimeout()
s.blockValidationPool = blockValidationPool
- s.syncNow = make(chan struct{})
+ s.syncNow = make(chan struct{}, 1)
return s
}
@@ -159,11 +160,11 @@ func (s *Service) IsSynchronizing() (synchronizing bool, initialSync bool) {
// triggerSync attempts to wake up the sync loop.
func (s *Service) triggerSync() {
- if syncing, initial := s.IsSynchronizing(); !syncing && !initial {
- select {
- case s.syncNow <- struct{}{}:
- default:
- }
+ // Prevents deadlock if periodic sync isn't running
+ // when catchup is setting the sync round.
+ select {
+ case s.syncNow <- struct{}{}:
+ default:
}
}
@@ -744,17 +745,19 @@ func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.Asy
if cert.Round == fetchedCert.Round &&
cert.Proposal.BlockDigest != fetchedCert.Proposal.BlockDigest &&
fetchedCert.Authenticate(*block, s.ledger, verifier) == nil {
- s := "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
- s += "!!!!!!!!!! FORK DETECTED !!!!!!!!!!!\n"
- s += "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
- s += "fetchRound called with a cert authenticating block with hash %v.\n"
- s += "We fetched a valid cert authenticating a different block, %v. This indicates a fork.\n\n"
- s += "Cert from our agreement service:\n%#v\n\n"
- s += "Cert from the fetcher:\n%#v\n\n"
- s += "Block from the fetcher:\n%#v\n\n"
- s += "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
- s += "!!!!!!!!!! FORK DETECTED !!!!!!!!!!!\n"
- s += "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
+ var builder strings.Builder
+ builder.WriteString("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n")
+ builder.WriteString("!!!!!!!!!! FORK DETECTED !!!!!!!!!!!\n")
+ builder.WriteString("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n")
+ builder.WriteString("fetchRound called with a cert authenticating block with hash %v.\n")
+ builder.WriteString("We fetched a valid cert authenticating a different block, %v. This indicates a fork.\n\n")
+ builder.WriteString("Cert from our agreement service:\n%#v\n\n")
+ builder.WriteString("Cert from the fetcher:\n%#v\n\n")
+ builder.WriteString("Block from the fetcher:\n%#v\n\n")
+ builder.WriteString("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n")
+ builder.WriteString("!!!!!!!!!! FORK DETECTED !!!!!!!!!!!\n")
+ builder.WriteString("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n")
+ s := builder.String()
s = fmt.Sprintf(s, cert.Proposal.BlockDigest, fetchedCert.Proposal.BlockDigest, cert, fetchedCert, block)
fmt.Println(s)
logging.Base().Error(s)
@@ -816,15 +819,17 @@ func createPeerSelector(net network.GossipNode, cfg config.Local, pipelineFetch
if cfg.NetAddress != "" { // Relay node
peerClasses = []peerClass{
{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
- {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivers},
- {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays},
- {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersConnectedIn},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivalNodes},
+ {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookArchivers},
+ {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersPhonebookRelays},
+ {initialRank: peerRankInitialFifthPriority, peerClass: network.PeersConnectedIn},
}
} else {
peerClasses = []peerClass{
- {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers},
- {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedOut},
- {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays},
+ {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivalNodes},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivers},
+ {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersConnectedOut},
+ {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersPhonebookRelays},
}
}
} else {
@@ -832,14 +837,16 @@ func createPeerSelector(net network.GossipNode, cfg config.Local, pipelineFetch
peerClasses = []peerClass{
{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedIn},
- {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays},
- {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersPhonebookArchivers},
+ {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookArchivalNodes},
+ {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersPhonebookRelays},
+ {initialRank: peerRankInitialFifthPriority, peerClass: network.PeersPhonebookArchivers},
}
} else {
peerClasses = []peerClass{
{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
- {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays},
- {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookArchivers},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivalNodes},
+ {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays},
+ {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersPhonebookArchivers},
}
}
}
@@ -848,13 +855,15 @@ func createPeerSelector(net network.GossipNode, cfg config.Local, pipelineFetch
if cfg.NetAddress != "" { // Relay node
peerClasses = []peerClass{
{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
- {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays},
- {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersConnectedIn},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivalNodes},
+ {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays},
+ {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersConnectedIn},
}
} else {
peerClasses = []peerClass{
- {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
- {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays},
+ {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivalNodes},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedOut},
+ {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays},
}
}
} else {
@@ -862,12 +871,14 @@ func createPeerSelector(net network.GossipNode, cfg config.Local, pipelineFetch
peerClasses = []peerClass{
{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedIn},
- {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays},
+ {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookArchivalNodes},
+ {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersPhonebookRelays},
}
} else {
peerClasses = []peerClass{
{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
- {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivalNodes},
+ {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays},
}
}
}
diff --git a/catchup/service_test.go b/catchup/service_test.go
index 217c23bf7..b0a650132 100644
--- a/catchup/service_test.go
+++ b/catchup/service_test.go
@@ -925,30 +925,34 @@ func TestCreatePeerSelector(t *testing.T) {
cfg.NetAddress = "someAddress"
s := MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
ps := createPeerSelector(s.net, s.cfg, true)
- require.Equal(t, 4, len(ps.peerClasses))
+ require.Equal(t, 5, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
require.Equal(t, peerRankInitialSecondPriority, ps.peerClasses[1].initialRank)
require.Equal(t, peerRankInitialThirdPriority, ps.peerClasses[2].initialRank)
require.Equal(t, peerRankInitialFourthPriority, ps.peerClasses[3].initialRank)
+ require.Equal(t, peerRankInitialFifthPriority, ps.peerClasses[4].initialRank)
require.Equal(t, network.PeersConnectedOut, ps.peerClasses[0].peerClass)
- require.Equal(t, network.PeersPhonebookArchivers, ps.peerClasses[1].peerClass)
- require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[2].peerClass)
- require.Equal(t, network.PeersConnectedIn, ps.peerClasses[3].peerClass)
+ require.Equal(t, network.PeersPhonebookArchivalNodes, ps.peerClasses[1].peerClass)
+ require.Equal(t, network.PeersPhonebookArchivers, ps.peerClasses[2].peerClass)
+ require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[3].peerClass)
+ require.Equal(t, network.PeersConnectedIn, ps.peerClasses[4].peerClass)
// cfg.EnableCatchupFromArchiveServers = true; cfg.NetAddress == ""; pipelineFetch = true;
cfg.EnableCatchupFromArchiveServers = true
cfg.NetAddress = ""
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
ps = createPeerSelector(s.net, s.cfg, true)
- require.Equal(t, 3, len(ps.peerClasses))
+ require.Equal(t, 4, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
require.Equal(t, peerRankInitialSecondPriority, ps.peerClasses[1].initialRank)
require.Equal(t, peerRankInitialThirdPriority, ps.peerClasses[2].initialRank)
+ require.Equal(t, peerRankInitialFourthPriority, ps.peerClasses[3].initialRank)
- require.Equal(t, network.PeersPhonebookArchivers, ps.peerClasses[0].peerClass)
- require.Equal(t, network.PeersConnectedOut, ps.peerClasses[1].peerClass)
- require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[2].peerClass)
+ require.Equal(t, network.PeersPhonebookArchivalNodes, ps.peerClasses[0].peerClass)
+ require.Equal(t, network.PeersPhonebookArchivers, ps.peerClasses[1].peerClass)
+ require.Equal(t, network.PeersConnectedOut, ps.peerClasses[2].peerClass)
+ require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[3].peerClass)
// cfg.EnableCatchupFromArchiveServers = true; cfg.NetAddress != ""; pipelineFetch = false
cfg.EnableCatchupFromArchiveServers = true
@@ -956,16 +960,18 @@ func TestCreatePeerSelector(t *testing.T) {
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
ps = createPeerSelector(s.net, s.cfg, false)
- require.Equal(t, 4, len(ps.peerClasses))
+ require.Equal(t, 5, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
require.Equal(t, peerRankInitialSecondPriority, ps.peerClasses[1].initialRank)
require.Equal(t, peerRankInitialThirdPriority, ps.peerClasses[2].initialRank)
require.Equal(t, peerRankInitialFourthPriority, ps.peerClasses[3].initialRank)
+ require.Equal(t, peerRankInitialFifthPriority, ps.peerClasses[4].initialRank)
require.Equal(t, network.PeersConnectedOut, ps.peerClasses[0].peerClass)
require.Equal(t, network.PeersConnectedIn, ps.peerClasses[1].peerClass)
- require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[2].peerClass)
- require.Equal(t, network.PeersPhonebookArchivers, ps.peerClasses[3].peerClass)
+ require.Equal(t, network.PeersPhonebookArchivalNodes, ps.peerClasses[2].peerClass)
+ require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[3].peerClass)
+ require.Equal(t, network.PeersPhonebookArchivers, ps.peerClasses[4].peerClass)
// cfg.EnableCatchupFromArchiveServers = true; cfg.NetAddress == ""; pipelineFetch = false
cfg.EnableCatchupFromArchiveServers = true
@@ -973,14 +979,16 @@ func TestCreatePeerSelector(t *testing.T) {
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
ps = createPeerSelector(s.net, s.cfg, false)
- require.Equal(t, 3, len(ps.peerClasses))
+ require.Equal(t, 4, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
require.Equal(t, peerRankInitialSecondPriority, ps.peerClasses[1].initialRank)
require.Equal(t, peerRankInitialThirdPriority, ps.peerClasses[2].initialRank)
+ require.Equal(t, peerRankInitialFourthPriority, ps.peerClasses[3].initialRank)
require.Equal(t, network.PeersConnectedOut, ps.peerClasses[0].peerClass)
- require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[1].peerClass)
- require.Equal(t, network.PeersPhonebookArchivers, ps.peerClasses[2].peerClass)
+ require.Equal(t, network.PeersPhonebookArchivalNodes, ps.peerClasses[1].peerClass)
+ require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[2].peerClass)
+ require.Equal(t, network.PeersPhonebookArchivers, ps.peerClasses[3].peerClass)
// cfg.EnableCatchupFromArchiveServers = false; cfg.NetAddress != ""; pipelineFetch = true
cfg.EnableCatchupFromArchiveServers = false
@@ -988,14 +996,16 @@ func TestCreatePeerSelector(t *testing.T) {
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
ps = createPeerSelector(s.net, s.cfg, true)
- require.Equal(t, 3, len(ps.peerClasses))
+ require.Equal(t, 4, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
require.Equal(t, peerRankInitialSecondPriority, ps.peerClasses[1].initialRank)
require.Equal(t, peerRankInitialThirdPriority, ps.peerClasses[2].initialRank)
+ require.Equal(t, peerRankInitialFourthPriority, ps.peerClasses[3].initialRank)
require.Equal(t, network.PeersConnectedOut, ps.peerClasses[0].peerClass)
- require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[1].peerClass)
- require.Equal(t, network.PeersConnectedIn, ps.peerClasses[2].peerClass)
+ require.Equal(t, network.PeersPhonebookArchivalNodes, ps.peerClasses[1].peerClass)
+ require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[2].peerClass)
+ require.Equal(t, network.PeersConnectedIn, ps.peerClasses[3].peerClass)
// cfg.EnableCatchupFromArchiveServers = false; cfg.NetAddress == ""; pipelineFetch = true
cfg.EnableCatchupFromArchiveServers = false
@@ -1003,12 +1013,14 @@ func TestCreatePeerSelector(t *testing.T) {
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
ps = createPeerSelector(s.net, s.cfg, true)
- require.Equal(t, 2, len(ps.peerClasses))
+ require.Equal(t, 3, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
require.Equal(t, peerRankInitialSecondPriority, ps.peerClasses[1].initialRank)
+ require.Equal(t, peerRankInitialThirdPriority, ps.peerClasses[2].initialRank)
- require.Equal(t, network.PeersConnectedOut, ps.peerClasses[0].peerClass)
- require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[1].peerClass)
+ require.Equal(t, network.PeersPhonebookArchivalNodes, ps.peerClasses[0].peerClass)
+ require.Equal(t, network.PeersConnectedOut, ps.peerClasses[1].peerClass)
+ require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[2].peerClass)
// cfg.EnableCatchupFromArchiveServers = false; cfg.NetAddress != ""; pipelineFetch = false
cfg.EnableCatchupFromArchiveServers = false
@@ -1016,14 +1028,16 @@ func TestCreatePeerSelector(t *testing.T) {
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
ps = createPeerSelector(s.net, s.cfg, false)
- require.Equal(t, 3, len(ps.peerClasses))
+ require.Equal(t, 4, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
require.Equal(t, peerRankInitialSecondPriority, ps.peerClasses[1].initialRank)
require.Equal(t, peerRankInitialThirdPriority, ps.peerClasses[2].initialRank)
+ require.Equal(t, peerRankInitialFourthPriority, ps.peerClasses[3].initialRank)
require.Equal(t, network.PeersConnectedOut, ps.peerClasses[0].peerClass)
require.Equal(t, network.PeersConnectedIn, ps.peerClasses[1].peerClass)
- require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[2].peerClass)
+ require.Equal(t, network.PeersPhonebookArchivalNodes, ps.peerClasses[2].peerClass)
+ require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[3].peerClass)
// cfg.EnableCatchupFromArchiveServers = false; cfg.NetAddress == ""; pipelineFetch = false
cfg.EnableCatchupFromArchiveServers = false
@@ -1031,12 +1045,14 @@ func TestCreatePeerSelector(t *testing.T) {
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
ps = createPeerSelector(s.net, s.cfg, false)
- require.Equal(t, 2, len(ps.peerClasses))
+ require.Equal(t, 3, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
require.Equal(t, peerRankInitialSecondPriority, ps.peerClasses[1].initialRank)
+ require.Equal(t, peerRankInitialThirdPriority, ps.peerClasses[2].initialRank)
require.Equal(t, network.PeersConnectedOut, ps.peerClasses[0].peerClass)
- require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[1].peerClass)
+ require.Equal(t, network.PeersPhonebookArchivalNodes, ps.peerClasses[1].peerClass)
+ require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[2].peerClass)
}
func TestServiceStartStop(t *testing.T) {
diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go
index 789ce55c7..c8dd8b9f9 100644
--- a/catchup/universalFetcher.go
+++ b/catchup/universalFetcher.go
@@ -151,17 +151,22 @@ func (w *wsFetcherClient) address() string {
return fmt.Sprintf("[ws] (%s)", w.target.GetAddress())
}
-// requestBlock send a request for block <round> and wait until it receives a response or a context expires.
-func (w *wsFetcherClient) requestBlock(ctx context.Context, round basics.Round) ([]byte, error) {
+// makeBlockRequestTopics builds topics for requesting a block.
+func makeBlockRequestTopics(r basics.Round) network.Topics {
roundBin := make([]byte, binary.MaxVarintLen64)
- binary.PutUvarint(roundBin, uint64(round))
- topics := network.Topics{
+ binary.PutUvarint(roundBin, uint64(r))
+ return network.Topics{
network.MakeTopic(rpcs.RequestDataTypeKey,
[]byte(rpcs.BlockAndCertValue)),
network.MakeTopic(
rpcs.RoundKey,
roundBin),
}
+}
+
+// requestBlock send a request for block <round> and wait until it receives a response or a context expires.
+func (w *wsFetcherClient) requestBlock(ctx context.Context, round basics.Round) ([]byte, error) {
+ topics := makeBlockRequestTopics(round)
resp, err := w.target.Request(ctx, protocol.UniEnsBlockReqTag, topics)
if err != nil {
return nil, makeErrWsFetcherRequestFailed(round, w.target.GetAddress(), err.Error())
diff --git a/catchup/universalFetcher_test.go b/catchup/universalFetcher_test.go
index 6107a695f..bf74c9570 100644
--- a/catchup/universalFetcher_test.go
+++ b/catchup/universalFetcher_test.go
@@ -20,6 +20,7 @@ import (
"context"
"errors"
"fmt"
+ "math/rand"
"net/http"
"testing"
"time"
@@ -312,3 +313,18 @@ func TestErrorTypes(t *testing.T) {
err6 := errHTTPResponseContentType{contentTypeCount: 1, contentType: "UNDEFINED"}
require.Equal(t, "HTTPFetcher.getBlockBytes: invalid content type: UNDEFINED", err6.Error())
}
+
+// Block Request topics request is a handrolled msgpack message with deterministic size. This test ensures that it matches the defined
+// constant in protocol
+func TestMaxBlockRequestSize(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ round := rand.Uint64()
+ topics := makeBlockRequestTopics(basics.Round(round))
+ nonce := rand.Uint64() - 1
+ nonceTopic := network.MakeNonceTopic(nonce)
+ topics = append(topics, nonceTopic)
+ serializedMsg := topics.MarshallTopics()
+ require.Equal(t, uint64(len(serializedMsg)), protocol.UniEnsBlockReqTag.MaxMessageSize())
+
+}
diff --git a/cmd/algocfg/profileCommand.go b/cmd/algocfg/profileCommand.go
index 6a80e7d64..6519786e7 100644
--- a/cmd/algocfg/profileCommand.go
+++ b/cmd/algocfg/profileCommand.go
@@ -42,6 +42,8 @@ var (
updateFunc: func(cfg config.Local) config.Local {
cfg.EnableExperimentalAPI = true
cfg.EnableDeveloperAPI = true
+ cfg.MaxAcctLookback = 256
+ cfg.EnableTxnEvalTracer = true
return cfg
},
}
@@ -59,7 +61,6 @@ var (
participation = configUpdater{
description: "Participate in consensus or simply ensure chain health by validating blocks.",
updateFunc: func(cfg config.Local) config.Local {
- cfg.CatchupBlockValidateMode = 0b1100
return cfg
},
}
@@ -142,13 +143,13 @@ var setProfileCmd = &cobra.Command{
reportErrorf("%v", err)
}
file := filepath.Join(dataDir, config.ConfigFilename)
- if _, err := os.Stat(file); !forceUpdate && err == nil {
+ if _, statErr := os.Stat(file); !forceUpdate && statErr == nil {
fmt.Printf("A config.json file already exists at %s\nWould you like to overwrite it? (Y/n)", file)
reader := bufio.NewReader(os.Stdin)
- resp, err := reader.ReadString('\n')
+ resp, readErr := reader.ReadString('\n')
resp = strings.TrimSpace(resp)
- if err != nil {
- reportErrorf("Failed to read response: %v", err)
+ if readErr != nil {
+ reportErrorf("Failed to read response: %v", readErr)
}
if strings.ToLower(resp) == "n" {
reportInfof("Exiting without overwriting existing config.")
diff --git a/cmd/algod/main.go b/cmd/algod/main.go
index 8ff24a970..47bf4fcae 100644
--- a/cmd/algod/main.go
+++ b/cmd/algod/main.go
@@ -19,6 +19,7 @@ package main
import (
"flag"
"fmt"
+ "github.com/algorand/go-algorand/util"
"math/rand"
"os"
"path/filepath"
@@ -158,12 +159,35 @@ func run() int {
}
defer fileLock.Unlock()
+ // Delete legacy indexer.sqlite files if they happen to exist
+ checkAndDeleteIndexerFile := func(fileName string) {
+ indexerDBFilePath := filepath.Join(absolutePath, genesis.ID(), fileName)
+
+ if util.FileExists(indexerDBFilePath) {
+ if idxFileRemoveErr := os.Remove(indexerDBFilePath); idxFileRemoveErr != nil {
+ fmt.Fprintf(os.Stderr, "Error removing %s file from data directory: %v\n", fileName, idxFileRemoveErr)
+ } else {
+ fmt.Fprintf(os.Stdout, "Removed legacy %s file from data directory\n", fileName)
+ }
+ }
+ }
+
+ checkAndDeleteIndexerFile("indexer.sqlite")
+ checkAndDeleteIndexerFile("indexer.sqlite-shm")
+ checkAndDeleteIndexerFile("indexer.sqlite-wal")
+
cfg, err := config.LoadConfigFromDisk(absolutePath)
if err != nil && !os.IsNotExist(err) {
// log is not setup yet, this will log to stderr
log.Fatalf("Cannot load config: %v", err)
}
+ _, err = cfg.ValidateDNSBootstrapArray(genesis.Network)
+ if err != nil {
+ // log is not setup yet, this will log to stderr
+ log.Fatalf("Error validating DNSBootstrap input: %v", err)
+ }
+
err = config.LoadConfigurableConsensusProtocols(absolutePath)
if err != nil {
// log is not setup yet, this will log to stderr
diff --git a/cmd/algofix/doc.go b/cmd/algofix/doc.go
index 057016957..062eb7928 100644
--- a/cmd/algofix/doc.go
+++ b/cmd/algofix/doc.go
@@ -8,6 +8,7 @@ newer ones. After you update to a new Go release, fix helps make
the necessary changes to your programs.
Usage:
+
go tool fix [-r name,...] [path ...]
Without an explicit path, fix reads standard input and writes the
@@ -30,7 +31,7 @@ Fix prints the full list of fixes it can apply in its help output;
to see them, run go tool fix -help.
Fix does not make backup copies of the files that it edits.
-Instead, use a version control system's ``diff'' functionality to inspect
+Instead, use a version control system's “diff” functionality to inspect
the changes that fix makes before committing them.
*/
package main
diff --git a/cmd/algoh/main.go b/cmd/algoh/main.go
index bcca7d567..c57c28ba9 100644
--- a/cmd/algoh/main.go
+++ b/cmd/algoh/main.go
@@ -105,6 +105,11 @@ func main() {
log.Fatalf("Cannot load config: %v", err)
}
+ _, err = algodConfig.ValidateDNSBootstrapArray(genesis.Network)
+ if err != nil {
+ log.Fatalf("Error validating DNSBootstrap input: %v", err)
+ }
+
if _, err := os.Stat(absolutePath); err != nil {
reportErrorf("Data directory %s does not appear to be valid\n", dataDir)
}
diff --git a/cmd/algokey/keyreg.go b/cmd/algokey/keyreg.go
index 02f18649b..439366442 100644
--- a/cmd/algokey/keyreg.go
+++ b/cmd/algokey/keyreg.go
@@ -24,6 +24,7 @@ import (
"strings"
"github.com/spf13/cobra"
+ "golang.org/x/exp/maps"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/account"
@@ -94,10 +95,7 @@ func init() {
"betanet": mustConvertB64ToDigest("mFgazF+2uRS1tMiL9dsj01hJGySEmPN28B/TjjvpVW0="),
"devnet": mustConvertB64ToDigest("sC3P7e2SdbqKJK0tbiCdK9tdSpbe6XeCGKdoNzmlj0E="),
}
- validNetworkList = make([]string, 0, len(validNetworks))
- for k := range validNetworks {
- validNetworkList = append(validNetworkList, k)
- }
+ validNetworkList = maps.Keys(validNetworks)
}
func mustConvertB64ToDigest(b64 string) (digest crypto.Digest) {
diff --git a/cmd/catchpointdump/file.go b/cmd/catchpointdump/file.go
index e99a0b406..2e3580b76 100644
--- a/cmd/catchpointdump/file.go
+++ b/cmd/catchpointdump/file.go
@@ -35,6 +35,7 @@ import (
"github.com/algorand/avm-abi/apps"
cmdutil "github.com/algorand/go-algorand/cmd/util"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/ledger"
@@ -48,11 +49,13 @@ import (
var catchpointFile string
var outFileName string
var excludedFields = cmdutil.MakeCobraStringSliceValue(nil, []string{"version", "catchpoint"})
+var printDigests bool
func init() {
fileCmd.Flags().StringVarP(&catchpointFile, "tar", "t", "", "Specify the catchpoint file (either .tar or .tar.gz) to process")
fileCmd.Flags().StringVarP(&outFileName, "output", "o", "", "Specify an outfile for the dump ( i.e. tracker.dump.txt )")
fileCmd.Flags().BoolVarP(&loadOnly, "load", "l", false, "Load only, do not dump")
+ fileCmd.Flags().BoolVarP(&printDigests, "digest", "d", false, "Print balances and spver digests")
fileCmd.Flags().VarP(excludedFields, "exclude-fields", "e", "List of fields to exclude from the dump: ["+excludedFields.AllowedString()+"]")
}
@@ -206,6 +209,18 @@ func loadCatchpointIntoDatabase(ctx context.Context, catchupAccessor ledger.Catc
header, err := tarReader.Next()
if err != nil {
if err == io.EOF {
+ if printDigests {
+ err = catchupAccessor.BuildMerkleTrie(ctx, func(uint64, uint64) {})
+ if err != nil {
+ return fileHeader, err
+ }
+ var balanceHash, spverHash crypto.Digest
+ balanceHash, spverHash, _, err = catchupAccessor.GetVerifyData(ctx)
+ if err != nil {
+ return fileHeader, err
+ }
+ fmt.Printf("accounts digest=%s, spver digest=%s\n\n", balanceHash, spverHash)
+ }
return fileHeader, nil
}
return fileHeader, err
diff --git a/cmd/catchpointdump/net.go b/cmd/catchpointdump/net.go
index e0fdb7f89..77c28870f 100644
--- a/cmd/catchpointdump/net.go
+++ b/cmd/catchpointdump/net.go
@@ -314,8 +314,8 @@ func loadAndDump(addr string, tarFile string, genesisInitState ledgercore.InitSt
}
defer func() {
- if err := deleteLedgerFiles(!loadOnly); err != nil {
- reportWarnf("Error deleting ledger files: %v", err)
+ if delErr := deleteLedgerFiles(!loadOnly); delErr != nil {
+ reportWarnf("Error deleting ledger files: %v", delErr)
}
}()
defer l.Close()
diff --git a/cmd/catchupsrv/download.go b/cmd/catchupsrv/download.go
index af4d7df6b..160605fe5 100644
--- a/cmd/catchupsrv/download.go
+++ b/cmd/catchupsrv/download.go
@@ -175,6 +175,7 @@ func fetcher(server string, wg *sync.WaitGroup) {
wg.Done()
}
+// TODO: We may want to implement conditional fallback to backup bootstrap logic here
func download() {
if *genesisFlag == "" {
panic("Must specify -genesis")
@@ -184,8 +185,9 @@ func download() {
serverList = strings.Split(*serversFlag, ";")
} else if *networkFlag != "" {
cfg := config.GetDefaultLocal()
- bootstrapID := cfg.DNSBootstrap(protocol.NetworkID(*networkFlag))
- _, records, err := net.LookupSRV("algobootstrap", "tcp", bootstrapID)
+ // only using first dnsBootstrap entry (if more than one are configured) and just the primary SRV, not backup
+ dnsBootstrap := cfg.DNSBootstrapArray(protocol.NetworkID(*networkFlag))[0]
+ _, records, err := net.LookupSRV("algobootstrap", "tcp", dnsBootstrap.PrimarySRVBootstrap)
if err != nil {
dnsAddr, err2 := net.ResolveIPAddr("ip", cfg.FallbackDNSResolverAddress)
if err2 != nil {
@@ -195,7 +197,7 @@ func download() {
var resolver tools_network.Resolver
resolver.SetFallbackResolverAddress(*dnsAddr)
- _, records, err = resolver.LookupSRV(context.Background(), "algobootstrap", "tcp", bootstrapID)
+ _, records, err = resolver.LookupSRV(context.Background(), "algobootstrap", "tcp", dnsBootstrap.PrimarySRVBootstrap)
if err != nil {
panic(err)
}
diff --git a/cmd/goal/account.go b/cmd/goal/account.go
index c8becdb8f..fcf4fbe09 100644
--- a/cmd/goal/account.go
+++ b/cmd/goal/account.go
@@ -19,7 +19,9 @@ package main
import (
"bufio"
"encoding/base64"
+ "errors"
"fmt"
+ "net/http"
"os"
"path/filepath"
"sort"
@@ -27,11 +29,13 @@ import (
"time"
"github.com/spf13/cobra"
+ "golang.org/x/exp/slices"
"github.com/algorand/go-algorand/cmd/util/datadir"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/passphrase"
+ apiClient "github.com/algorand/go-algorand/daemon/algod/api/client"
"github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
algodAcct "github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
@@ -557,8 +561,7 @@ var infoCmd = &cobra.Command{
func printAccountInfo(client libgoal.Client, address string, onlyShowAssetIds bool, account model.Account) bool {
var createdAssets []model.Asset
if account.CreatedAssets != nil {
- createdAssets = make([]model.Asset, len(*account.CreatedAssets))
- copy(createdAssets, *account.CreatedAssets)
+ createdAssets = slices.Clone(*account.CreatedAssets)
sort.Slice(createdAssets, func(i, j int) bool {
return createdAssets[i].Index < createdAssets[j].Index
})
@@ -566,8 +569,7 @@ func printAccountInfo(client libgoal.Client, address string, onlyShowAssetIds bo
var heldAssets []model.AssetHolding
if account.Assets != nil {
- heldAssets = make([]model.AssetHolding, len(*account.Assets))
- copy(heldAssets, *account.Assets)
+ heldAssets = slices.Clone(*account.Assets)
sort.Slice(heldAssets, func(i, j int) bool {
return heldAssets[i].AssetID < heldAssets[j].AssetID
})
@@ -575,8 +577,7 @@ func printAccountInfo(client libgoal.Client, address string, onlyShowAssetIds bo
var createdApps []model.Application
if account.CreatedApps != nil {
- createdApps = make([]model.Application, len(*account.CreatedApps))
- copy(createdApps, *account.CreatedApps)
+ createdApps = slices.Clone(*account.CreatedApps)
sort.Slice(createdApps, func(i, j int) bool {
return createdApps[i].Id < createdApps[j].Id
})
@@ -584,8 +585,7 @@ func printAccountInfo(client libgoal.Client, address string, onlyShowAssetIds bo
var optedInApps []model.ApplicationLocalState
if account.AppsLocalState != nil {
- optedInApps = make([]model.ApplicationLocalState, len(*account.AppsLocalState))
- copy(optedInApps, *account.AppsLocalState)
+ optedInApps = slices.Clone(*account.AppsLocalState)
sort.Slice(optedInApps, func(i, j int) bool {
return optedInApps[i].Id < optedInApps[j].Id
})
@@ -632,9 +632,15 @@ func printAccountInfo(client libgoal.Client, address string, onlyShowAssetIds bo
}
assetParams, err := client.AssetInformation(assetHolding.AssetID)
if err != nil {
- hasError = true
- fmt.Fprintf(errorReport, "Error: Unable to retrieve asset information for asset %d referred to by account %s: %v\n", assetHolding.AssetID, address, err)
- fmt.Fprintf(report, "\tID %d, error\n", assetHolding.AssetID)
+ var httpError apiClient.HTTPError
+ if errors.As(err, &httpError) && httpError.StatusCode == http.StatusNotFound {
+ fmt.Fprintf(report, "\tID %d, <deleted/unknown asset>\n", assetHolding.AssetID)
+ } else {
+ fmt.Fprintf(errorReport, "Error: Unable to retrieve asset information for asset %d referred to by account %s: %v\n", assetHolding.AssetID, address, err)
+ fmt.Fprintf(report, "\tID %d, error\n", assetHolding.AssetID)
+ hasError = true
+ }
+ continue
}
amount := assetDecimalsFmt(assetHolding.Amount, assetParams.Params.Decimals)
@@ -948,16 +954,16 @@ No --delete-input flag specified, exiting without installing key.`)
reportErrorf(errorRequestFail, err)
}
// In an abundance of caution, check for ourselves that the key has been installed.
- if err := client.VerifyParticipationKey(time.Minute, addResponse.PartId); err != nil {
- err = fmt.Errorf("unable to verify key installation. Verify key installation with 'goal account partkeyinfo' and delete '%s', or retry the command. Error: %w", partKeyFile, err)
- reportErrorf(errorRequestFail, err)
+ if vErr := client.VerifyParticipationKey(time.Minute, addResponse.PartId); vErr != nil {
+ vErr = fmt.Errorf("unable to verify key installation. Verify key installation with 'goal account partkeyinfo' and delete '%s', or retry the command. Error: %w", partKeyFile, vErr)
+ reportErrorf(errorRequestFail, vErr)
}
reportInfof("Participation key installed successfully, Participation ID: %s\n", addResponse.PartId)
// Delete partKeyFile
- if nil != os.Remove(partKeyFile) {
- reportErrorf("An error occurred while removing the partkey file, please delete it manually: %s", err)
+ if osErr := os.Remove(partKeyFile); osErr != nil {
+ reportErrorf("An error occurred while removing the partkey file, please delete it manually: %s", osErr)
}
},
}
diff --git a/cmd/goal/application.go b/cmd/goal/application.go
index cb048ce50..8d442bf5c 100644
--- a/cmd/goal/application.go
+++ b/cmd/goal/application.go
@@ -220,19 +220,17 @@ type boxRef struct {
// newBoxRef parses a command-line box ref, which is an optional appId, a comma,
// and then the same format as an app call arg.
func newBoxRef(arg string) boxRef {
- parts := strings.SplitN(arg, ":", 2)
- if len(parts) != 2 {
+ encoding, value, found := strings.Cut(arg, ":")
+ if !found {
reportErrorf("box refs should be of the form '[<app>,]encoding:value'")
}
- encoding := parts[0] // tentative, may be <app>,<encoding>
- value := parts[1]
- parts = strings.SplitN(encoding, ",", 2)
appID := uint64(0)
- if len(parts) == 2 {
+
+ if appStr, enc, found := strings.Cut(encoding, ","); found {
// There was a comma in the part before the ":"
- encoding = parts[1]
+ encoding = enc
var err error
- appID, err = strconv.ParseUint(parts[0], 10, 64)
+ appID, err = strconv.ParseUint(appStr, 10, 64)
if err != nil {
reportErrorf("Could not parse app id in box ref: %v", err)
}
@@ -480,14 +478,14 @@ var createAppCmd = &cobra.Command{
if outFilename == "" {
// Broadcast
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
- if err != nil {
- reportErrorf(errorSigningTX, err)
+ signedTxn, err2 := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
+ if err2 != nil {
+ reportErrorf(errorSigningTX, err2)
}
- txid, err := client.BroadcastTransaction(signedTxn)
- if err != nil {
- reportErrorf(errorBroadcastingTX, err)
+ txid, err2 := client.BroadcastTransaction(signedTxn)
+ if err2 != nil {
+ reportErrorf(errorBroadcastingTX, err2)
}
reportInfof("Attempting to create app (approval size %d, hash %v; clear size %d, hash %v)", len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), crypto.HashObj(logic.Program(clearProg)))
@@ -555,23 +553,23 @@ var updateAppCmd = &cobra.Command{
// Broadcast or write transaction to file
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
- if err != nil {
- reportErrorf(errorSigningTX, err)
+ signedTxn, err2 := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
+ if err2 != nil {
+ reportErrorf(errorSigningTX, err2)
}
- txid, err := client.BroadcastTransaction(signedTxn)
- if err != nil {
- reportErrorf(errorBroadcastingTX, err)
+ txid, err2 := client.BroadcastTransaction(signedTxn)
+ if err2 != nil {
+ reportErrorf(errorBroadcastingTX, err2)
}
reportInfof("Attempting to update app (approval size %d, hash %v; clear size %d, hash %v)", len(approvalProg), crypto.HashObj(logic.Program(approvalProg)), len(clearProg), crypto.HashObj(logic.Program(clearProg)))
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- _, err = waitForCommit(client, txid, lv)
- if err != nil {
- reportErrorf(err.Error())
+ _, err2 = waitForCommit(client, txid, lv)
+ if err2 != nil {
+ reportErrorf(err2.Error())
}
}
} else {
@@ -625,23 +623,23 @@ var optInAppCmd = &cobra.Command{
// Broadcast or write transaction to file
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
- if err != nil {
- reportErrorf(errorSigningTX, err)
+ signedTxn, err2 := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
+ if err2 != nil {
+ reportErrorf(errorSigningTX, err2)
}
- txid, err := client.BroadcastTransaction(signedTxn)
- if err != nil {
- reportErrorf(errorBroadcastingTX, err)
+ txid, err2 := client.BroadcastTransaction(signedTxn)
+ if err2 != nil {
+ reportErrorf(errorBroadcastingTX, err2)
}
// Report tx details to user
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- _, err = waitForCommit(client, txid, lv)
- if err != nil {
- reportErrorf(err.Error())
+ _, err2 = waitForCommit(client, txid, lv)
+ if err2 != nil {
+ reportErrorf(err2.Error())
}
}
} else {
@@ -695,23 +693,23 @@ var closeOutAppCmd = &cobra.Command{
// Broadcast or write transaction to file
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
- if err != nil {
- reportErrorf(errorSigningTX, err)
+ signedTxn, err2 := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
+ if err2 != nil {
+ reportErrorf(errorSigningTX, err2)
}
- txid, err := client.BroadcastTransaction(signedTxn)
- if err != nil {
- reportErrorf(errorBroadcastingTX, err)
+ txid, err2 := client.BroadcastTransaction(signedTxn)
+ if err2 != nil {
+ reportErrorf(errorBroadcastingTX, err2)
}
// Report tx details to user
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- _, err = waitForCommit(client, txid, lv)
- if err != nil {
- reportErrorf(err.Error())
+ _, err2 = waitForCommit(client, txid, lv)
+ if err2 != nil {
+ reportErrorf(err2.Error())
}
}
} else {
@@ -765,23 +763,23 @@ var clearAppCmd = &cobra.Command{
// Broadcast or write transaction to file
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
- if err != nil {
- reportErrorf(errorSigningTX, err)
+ signedTxn, err2 := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
+ if err2 != nil {
+ reportErrorf(errorSigningTX, err2)
}
- txid, err := client.BroadcastTransaction(signedTxn)
- if err != nil {
- reportErrorf(errorBroadcastingTX, err)
+ txid, err2 := client.BroadcastTransaction(signedTxn)
+ if err2 != nil {
+ reportErrorf(errorBroadcastingTX, err2)
}
// Report tx details to user
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- _, err = waitForCommit(client, txid, lv)
- if err != nil {
- reportErrorf(err.Error())
+ _, err2 = waitForCommit(client, txid, lv)
+ if err2 != nil {
+ reportErrorf(err2.Error())
}
}
} else {
@@ -835,23 +833,23 @@ var callAppCmd = &cobra.Command{
// Broadcast or write transaction to file
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
- if err != nil {
- reportErrorf(errorSigningTX, err)
+ signedTxn, err2 := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
+ if err2 != nil {
+ reportErrorf(errorSigningTX, err2)
}
- txid, err := client.BroadcastTransaction(signedTxn)
- if err != nil {
- reportErrorf(errorBroadcastingTX, err)
+ txid, err2 := client.BroadcastTransaction(signedTxn)
+ if err2 != nil {
+ reportErrorf(errorBroadcastingTX, err2)
}
// Report tx details to user
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- _, err = waitForCommit(client, txid, lv)
- if err != nil {
- reportErrorf(err.Error())
+ _, err2 = waitForCommit(client, txid, lv)
+ if err2 != nil {
+ reportErrorf(err2.Error())
}
}
} else {
@@ -905,23 +903,23 @@ var deleteAppCmd = &cobra.Command{
// Broadcast or write transaction to file
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
- if err != nil {
- reportErrorf(errorSigningTX, err)
+ signedTxn, err2 := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
+ if err2 != nil {
+ reportErrorf(errorSigningTX, err2)
}
- txid, err := client.BroadcastTransaction(signedTxn)
- if err != nil {
- reportErrorf(errorBroadcastingTX, err)
+ txid, err2 := client.BroadcastTransaction(signedTxn)
+ if err2 != nil {
+ reportErrorf(errorBroadcastingTX, err2)
}
// Report tx details to user
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- _, err = waitForCommit(client, txid, lv)
- if err != nil {
- reportErrorf(err.Error())
+ _, err2 = waitForCommit(client, txid, lv)
+ if err2 != nil {
+ reportErrorf(err2.Error())
}
}
} else {
@@ -1310,9 +1308,9 @@ var methodAppCmd = &cobra.Command{
var retType *abi.Type
if retTypeStr != abi.VoidReturnType {
- theRetType, err := abi.TypeOf(retTypeStr)
- if err != nil {
- reportErrorf("cannot cast %s to abi type: %v", retTypeStr, err)
+ theRetType, typeErr := abi.TypeOf(retTypeStr)
+ if typeErr != nil {
+ reportErrorf("cannot cast %s to abi type: %v", retTypeStr, typeErr)
}
retType = &theRetType
}
@@ -1401,9 +1399,9 @@ var methodAppCmd = &cobra.Command{
txnGroup = append(txnGroup, appCallTxn)
if len(txnGroup) > 1 {
// Only if transaction arguments are present, assign group ID
- groupID, err := client.GroupID(txnGroup)
- if err != nil {
- reportErrorf("Cannot assign transaction group ID: %s", err)
+ groupID, gidErr := client.GroupID(txnGroup)
+ if gidErr != nil {
+ reportErrorf("Cannot assign transaction group ID: %s", gidErr)
}
for i := range txnGroup {
txnGroup[i].Group = groupID
@@ -1428,9 +1426,9 @@ var methodAppCmd = &cobra.Command{
continue
}
- signedTxn, err := createSignedTransaction(client, shouldSign, dataDir, walletName, unsignedTxn, txnFromArgs.AuthAddr)
- if err != nil {
- reportErrorf(errorSigningTX, err)
+ signedTxn, signErr := createSignedTransaction(client, shouldSign, dataDir, walletName, unsignedTxn, txnFromArgs.AuthAddr)
+ if signErr != nil {
+ reportErrorf(errorSigningTX, signErr)
}
signedTxnGroup = append(signedTxnGroup, signedTxn)
diff --git a/cmd/goal/asset.go b/cmd/goal/asset.go
index f882f85c0..64c2d4c56 100644
--- a/cmd/goal/asset.go
+++ b/cmd/goal/asset.go
@@ -307,14 +307,14 @@ var createAssetCmd = &cobra.Command{
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
- if err != nil {
- reportErrorf(errorSigningTX, err)
+ signedTxn, err2 := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
+ if err2 != nil {
+ reportErrorf(errorSigningTX, err2)
}
- txid, err := client.BroadcastTransaction(signedTxn)
- if err != nil {
- reportErrorf(errorBroadcastingTX, err)
+ txid, err2 := client.BroadcastTransaction(signedTxn)
+ if err2 != nil {
+ reportErrorf(errorBroadcastingTX, err2)
}
// Report tx details to user
@@ -386,23 +386,23 @@ var destroyAssetCmd = &cobra.Command{
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
- if err != nil {
- reportErrorf(errorSigningTX, err)
+ signedTxn, err2 := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
+ if err2 != nil {
+ reportErrorf(errorSigningTX, err2)
}
- txid, err := client.BroadcastTransaction(signedTxn)
- if err != nil {
- reportErrorf(errorBroadcastingTX, err)
+ txid, err2 := client.BroadcastTransaction(signedTxn)
+ if err2 != nil {
+ reportErrorf(errorBroadcastingTX, err2)
}
// Report tx details to user
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- _, err = waitForCommit(client, txid, lastValid)
- if err != nil {
- reportErrorf(err.Error())
+ _, err2 = waitForCommit(client, txid, lastValid)
+ if err2 != nil {
+ reportErrorf(err2.Error())
}
}
} else {
@@ -479,23 +479,23 @@ var configAssetCmd = &cobra.Command{
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
- if err != nil {
- reportErrorf(errorSigningTX, err)
+ signedTxn, err2 := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
+ if err2 != nil {
+ reportErrorf(errorSigningTX, err2)
}
- txid, err := client.BroadcastTransaction(signedTxn)
- if err != nil {
- reportErrorf(errorBroadcastingTX, err)
+ txid, err2 := client.BroadcastTransaction(signedTxn)
+ if err2 != nil {
+ reportErrorf(errorBroadcastingTX, err2)
}
// Report tx details to user
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- _, err = waitForCommit(client, txid, lastValid)
- if err != nil {
- reportErrorf(err.Error())
+ _, err2 = waitForCommit(client, txid, lastValid)
+ if err2 != nil {
+ reportErrorf(err2.Error())
}
}
} else {
@@ -566,23 +566,23 @@ var sendAssetCmd = &cobra.Command{
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
- if err != nil {
- reportErrorf(errorSigningTX, err)
+ signedTxn, err2 := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
+ if err2 != nil {
+ reportErrorf(errorSigningTX, err2)
}
- txid, err := client.BroadcastTransaction(signedTxn)
- if err != nil {
- reportErrorf(errorBroadcastingTX, err)
+ txid, err2 := client.BroadcastTransaction(signedTxn)
+ if err2 != nil {
+ reportErrorf(errorBroadcastingTX, err2)
}
// Report tx details to user
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- _, err = waitForCommit(client, txid, lastValid)
- if err != nil {
- reportErrorf(err.Error())
+ _, err2 = waitForCommit(client, txid, lastValid)
+ if err2 != nil {
+ reportErrorf(err2.Error())
}
}
} else {
@@ -635,23 +635,23 @@ var freezeAssetCmd = &cobra.Command{
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
- if err != nil {
- reportErrorf(errorSigningTX, err)
+ signedTxn, err2 := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
+ if err2 != nil {
+ reportErrorf(errorSigningTX, err2)
}
- txid, err := client.BroadcastTransaction(signedTxn)
- if err != nil {
- reportErrorf(errorBroadcastingTX, err)
+ txid, err2 := client.BroadcastTransaction(signedTxn)
+ if err2 != nil {
+ reportErrorf(errorBroadcastingTX, err2)
}
// Report tx details to user
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- _, err = waitForCommit(client, txid, lastValid)
- if err != nil {
- reportErrorf(err.Error())
+ _, err2 = waitForCommit(client, txid, lastValid)
+ if err2 != nil {
+ reportErrorf(err2.Error())
}
}
} else {
@@ -724,23 +724,23 @@ var optinAssetCmd = &cobra.Command{
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
- if err != nil {
- reportErrorf(errorSigningTX, err)
+ signedTxn, err2 := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
+ if err2 != nil {
+ reportErrorf(errorSigningTX, err2)
}
- txid, err := client.BroadcastTransaction(signedTxn)
- if err != nil {
- reportErrorf(errorBroadcastingTX, err)
+ txid, err2 := client.BroadcastTransaction(signedTxn)
+ if err2 != nil {
+ reportErrorf(errorBroadcastingTX, err2)
}
// Report tx details to user
reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw)
if !noWaitAfterSend {
- _, err = waitForCommit(client, txid, lastValid)
- if err != nil {
- reportErrorf(err.Error())
+ _, err2 = waitForCommit(client, txid, lastValid)
+ if err2 != nil {
+ reportErrorf(err2.Error())
}
}
} else {
diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go
index e27d5e031..d84d43bec 100644
--- a/cmd/goal/clerk.go
+++ b/cmd/goal/clerk.go
@@ -73,6 +73,9 @@ var (
simulateAllowMoreLogging bool
simulateAllowMoreOpcodeBudget bool
simulateExtraOpcodeBudget uint64
+ simulateEnableRequestTrace bool
+ simulateStackChange bool
+ simulateScratchChange bool
)
func init() {
@@ -96,7 +99,7 @@ func init() {
sendCmd.Flags().Uint64VarP(&amount, "amount", "a", 0, "The amount to be transferred (required), in microAlgos")
sendCmd.Flags().StringVarP(&closeToAddress, "close-to", "c", "", "Close account and send remainder to this address")
sendCmd.Flags().StringVar(&rekeyToAddress, "rekey-to", "", "Rekey account to the given spending key/address. (Future transactions from this account will need to be signed with the new key.)")
- sendCmd.Flags().StringVarP(&programSource, "from-program", "F", "", "Program source to use as account logic")
+ sendCmd.Flags().StringVarP(&programSource, "from-program", "F", "", "Program source file to use as account logic")
sendCmd.Flags().StringVarP(&progByteFile, "from-program-bytes", "P", "", "Program binary to use as account logic")
sendCmd.Flags().StringSliceVar(&argB64Strings, "argb64", nil, "Base64 encoded args to pass to transaction logic")
sendCmd.Flags().StringVarP(&logicSigFile, "logic-sig", "L", "", "LogicSig to apply to transaction")
@@ -116,7 +119,7 @@ func init() {
signCmd.Flags().StringVarP(&txFilename, "infile", "i", "", "Partially-signed transaction file to add signature to")
signCmd.Flags().StringVarP(&outFilename, "outfile", "o", "", "Filename for writing the signed transaction")
signCmd.Flags().StringVarP(&signerAddress, "signer", "S", "", "Address of key to sign with, if different from transaction \"from\" address due to rekeying")
- signCmd.Flags().StringVarP(&programSource, "program", "p", "", "Program source to use as account logic")
+ signCmd.Flags().StringVarP(&programSource, "program", "p", "", "Program source file to use as account logic")
signCmd.Flags().StringVarP(&logicSigFile, "logic-sig", "L", "", "LogicSig to apply to transaction")
signCmd.Flags().StringSliceVar(&argB64Strings, "argb64", nil, "Base64 encoded args to pass to transaction logic")
signCmd.Flags().StringVarP(&protoVersion, "proto", "P", "", "Consensus protocol version id string")
@@ -161,6 +164,9 @@ func init() {
simulateCmd.Flags().BoolVar(&simulateAllowMoreLogging, "allow-more-logging", false, "Lift the limits on log opcode during simulation")
simulateCmd.Flags().BoolVar(&simulateAllowMoreOpcodeBudget, "allow-more-opcode-budget", false, "Apply max extra opcode budget for apps per transaction group (default 320000) during simulation")
simulateCmd.Flags().Uint64Var(&simulateExtraOpcodeBudget, "extra-opcode-budget", 0, "Apply extra opcode budget for apps per transaction group during simulation")
+ simulateCmd.Flags().BoolVar(&simulateEnableRequestTrace, "trace", false, "Enable simulation time execution trace of app calls")
+ simulateCmd.Flags().BoolVar(&simulateStackChange, "stack", false, "Report stack change during simulation time")
+ simulateCmd.Flags().BoolVar(&simulateScratchChange, "scratch", false, "Report scratch change during simulation time")
}
var clerkCmd = &cobra.Command{
@@ -443,10 +449,10 @@ var sendCmd = &cobra.Command{
}
groupCtx, err := verify.PrepareGroupContext([]transactions.SignedTxn{uncheckedTxn}, &blockHeader, nil)
if err == nil {
- err = verify.LogicSigSanityCheck(&uncheckedTxn, 0, groupCtx)
+ err = verify.LogicSigSanityCheck(0, groupCtx)
}
if err != nil {
- reportErrorf("%s: txn[0] error %s", outFilename, err)
+ reportErrorf("%s: txn error %s", outFilename, err)
}
stx = uncheckedTxn
} else if program != nil {
@@ -628,7 +634,7 @@ var rawsendCmd = &cobra.Command{
}
if txn.ConfirmedRound != nil && *txn.ConfirmedRound > 0 {
- reportInfof(infoTxCommitted, txidStr, txn.ConfirmedRound)
+ reportInfof(infoTxCommitted, txidStr, *txn.ConfirmedRound)
break
}
@@ -848,17 +854,17 @@ var signCmd = &cobra.Command{
reportErrorf("%s: %v", txFilename, err)
}
}
- for i, txn := range txnGroup {
+ for i := range txnGroup {
var signedTxn transactions.SignedTxn
if lsig.Logic != nil {
- err = verify.LogicSigSanityCheck(&txn, i, groupCtx)
+ err = verify.LogicSigSanityCheck(i, groupCtx)
if err != nil {
reportErrorf("%s: txn[%d] error %s", txFilename, txnIndex[txnGroups[group][i]], err)
}
- signedTxn = txn
+ signedTxn = txnGroup[i]
} else {
// sign the usual way
- signedTxn, err = client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, txn.Txn)
+ signedTxn, err = client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, txnGroup[i].Txn)
if err != nil {
reportErrorf(errorSigningTX, err)
}
@@ -1276,6 +1282,7 @@ var simulateCmd = &cobra.Command{
AllowEmptySignatures: simulateAllowEmptySignatures,
AllowMoreLogging: simulateAllowMoreLogging,
ExtraOpcodeBudget: simulateExtraOpcodeBudget,
+ ExecTraceConfig: traceCmdOptionToSimulateTraceConfigModel(),
}
err := writeFile(requestOutFilename, protocol.EncodeJSON(simulateRequest), 0600)
if err != nil {
@@ -1299,6 +1306,7 @@ var simulateCmd = &cobra.Command{
AllowEmptySignatures: simulateAllowEmptySignatures,
AllowMoreLogging: simulateAllowMoreLogging,
ExtraOpcodeBudget: simulateExtraOpcodeBudget,
+ ExecTraceConfig: traceCmdOptionToSimulateTraceConfigModel(),
}
simulateResponse, responseErr = client.SimulateTransactions(simulateRequest)
} else {
@@ -1358,3 +1366,11 @@ func decodeTxnsFromFile(file string) []transactions.SignedTxn {
}
return txgroup
}
+
+func traceCmdOptionToSimulateTraceConfigModel() simulation.ExecTraceConfig {
+ return simulation.ExecTraceConfig{
+ Enable: simulateEnableRequestTrace,
+ Stack: simulateStackChange,
+ Scratch: simulateScratchChange,
+ }
+}
diff --git a/cmd/goal/interact.go b/cmd/goal/interact.go
index fcdcc3953..825d74388 100644
--- a/cmd/goal/interact.go
+++ b/cmd/goal/interact.go
@@ -614,14 +614,14 @@ var appExecuteCmd = &cobra.Command{
if outFilename == "" {
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
- signedTxn, err := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
- if err != nil {
- reportErrorf(errorSigningTX, err)
+ signedTxn, err2 := client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, tx)
+ if err2 != nil {
+ reportErrorf(errorSigningTX, err2)
}
- txid, err := client.BroadcastTransaction(signedTxn)
- if err != nil {
- reportErrorf(errorBroadcastingTX, err)
+ txid, err2 := client.BroadcastTransaction(signedTxn)
+ if err2 != nil {
+ reportErrorf(errorBroadcastingTX, err2)
}
if appIdx == 0 {
diff --git a/cmd/goal/network.go b/cmd/goal/network.go
index ddad6f99d..5c32b68e3 100644
--- a/cmd/goal/network.go
+++ b/cmd/goal/network.go
@@ -98,9 +98,9 @@ var networkCreateCmd = &cobra.Command{
if err != nil {
panic(err)
}
- file, err := os.Open(networkTemplateFile)
- if err != nil {
- reportErrorf(errorCreateNetwork, err)
+ file, osErr := os.Open(networkTemplateFile)
+ if osErr != nil {
+ reportErrorf(errorCreateNetwork, osErr)
}
defer file.Close()
diff --git a/cmd/goal/node.go b/cmd/goal/node.go
index 2b70c2532..7d5f2df14 100644
--- a/cmd/goal/node.go
+++ b/cmd/goal/node.go
@@ -56,7 +56,6 @@ var waitSec uint32
var newNodeNetwork string
var newNodeDestination string
var newNodeArchival bool
-var newNodeIndexer bool
var newNodeRelay string
var newNodeFullConfig bool
var watchMillisecond uint64
@@ -98,7 +97,13 @@ func init() {
createCmd.Flags().StringVar(&newNodeDestination, "destination", "", "Destination path for the new node")
createCmd.Flags().BoolVarP(&newNodeArchival, "archival", "a", localDefaults.Archival, "Make the new node archival, storing all blocks")
createCmd.Flags().BoolVarP(&runUnderHost, "hosted", "H", localDefaults.RunHosted, "Configure the new node to run hosted by algoh")
- createCmd.Flags().BoolVarP(&newNodeIndexer, "indexer", "i", localDefaults.IsIndexerActive, "Configure the new node to enable the indexer feature (implies --archival)")
+
+ // The flag for enabling an internal indexer is now deprecated, but we keep it for backwards compatibility for now.
+ indexerFlagName := "indexer"
+ _ = createCmd.Flags().BoolP(indexerFlagName, "i", false, "")
+ createCmd.Flags().MarkDeprecated(indexerFlagName, "no longer used, please remove from your scripts")
+ createCmd.Flags().MarkShorthandDeprecated(indexerFlagName, "no longer used, please remove from your scripts")
+
createCmd.Flags().StringVar(&newNodeRelay, "relay", localDefaults.NetAddress, "Configure as a relay with specified listening address (NetAddress)")
createCmd.Flags().StringVar(&listenIP, "api", "", "REST API Endpoint")
createCmd.Flags().BoolVar(&newNodeFullConfig, "full-config", false, "Store full config file")
@@ -680,8 +685,7 @@ var createCmd = &cobra.Command{
reportErrorf(errorNodeCreationIPFailure, listenIP)
}
}
- localConfig.Archival = newNodeArchival || newNodeRelay != "" || newNodeIndexer
- localConfig.IsIndexerActive = newNodeIndexer
+ localConfig.Archival = newNodeArchival || newNodeRelay != ""
localConfig.RunHosted = runUnderHost
localConfig.EnableLedgerService = localConfig.Archival
localConfig.EnableBlockService = localConfig.Archival
diff --git a/cmd/goal/tealsign.go b/cmd/goal/tealsign.go
index 33af6ca01..9d9f144da 100644
--- a/cmd/goal/tealsign.go
+++ b/cmd/goal/tealsign.go
@@ -123,14 +123,14 @@ The base64 encoding of the signature will always be printed to stdout. Optionall
if lsigTxnFilename != "" {
// If passed a SignedTxn with a logic sig, compute
// the hash of the program within the logic sig
- stxnBytes, err := os.ReadFile(lsigTxnFilename)
- if err != nil {
- reportErrorf(fileReadError, lsigTxnFilename, err)
+ stxnBytes, err2 := os.ReadFile(lsigTxnFilename)
+ if err2 != nil {
+ reportErrorf(fileReadError, lsigTxnFilename, err2)
}
- err = protocol.Decode(stxnBytes, &stxn)
- if err != nil {
- reportErrorf(txDecodeError, lsigTxnFilename, err)
+ err2 = protocol.Decode(stxnBytes, &stxn)
+ if err2 != nil {
+ reportErrorf(txDecodeError, lsigTxnFilename, err2)
}
// Ensure signed transaction has a logic sig with a
diff --git a/cmd/incorporate/incorporate.go b/cmd/incorporate/incorporate.go
index 38631a101..97d4b44d6 100644
--- a/cmd/incorporate/incorporate.go
+++ b/cmd/incorporate/incorporate.go
@@ -175,7 +175,7 @@ func parseInput() (genesis bookkeeping.Genesis) {
alloc := bookkeeping.GenesisAllocation{
Address: record.Address,
Comment: record.Comment,
- State: basics.AccountData{
+ State: bookkeeping.GenesisAccountData{
Status: record.Status,
MicroAlgos: basics.MicroAlgos{Raw: record.Algos * 1e6},
VoteID: record.VoteID,
diff --git a/cmd/loadgenerator/main.go b/cmd/loadgenerator/main.go
index 594f89380..9b4bb6105 100644
--- a/cmd/loadgenerator/main.go
+++ b/cmd/loadgenerator/main.go
@@ -106,11 +106,11 @@ func main() {
if (cfg.ClientURL == nil || cfg.ClientURL.String() == "") || cfg.APIToken == "" {
if algodDir != "" {
path := filepath.Join(algodDir, "algod.net")
- net, err := os.ReadFile(path)
- maybefail(err, "%s: %v\n", path, err)
+ net, osErr := os.ReadFile(path)
+ maybefail(osErr, "%s: %v\n", path, osErr)
path = filepath.Join(algodDir, "algod.token")
- token, err := os.ReadFile(path)
- maybefail(err, "%s: %v\n", path, err)
+ token, osErr := os.ReadFile(path)
+ maybefail(osErr, "%s: %v\n", path, osErr)
cfg.ClientURL, err = url.Parse(fmt.Sprintf("http://%s", string(strings.TrimSpace(string(net)))))
maybefail(err, "bad net url %v\n", err)
cfg.APIToken = string(token)
diff --git a/cmd/netgoal/generate.go b/cmd/netgoal/generate.go
index 88653df88..4757cdc6a 100644
--- a/cmd/netgoal/generate.go
+++ b/cmd/netgoal/generate.go
@@ -52,6 +52,8 @@ var accountsCount uint64
var assetsCount uint64
var applicationCount uint64
var balRange []string
+var lastPartKeyRound uint64
+var deterministicKeys bool
func init() {
rootCmd.AddCommand(generateCmd)
@@ -77,6 +79,8 @@ func init() {
generateCmd.Flags().Uint64VarP(&assetsCount, "nassets", "", 5, "Asset count")
generateCmd.Flags().Uint64VarP(&applicationCount, "napps", "", 7, "Application Count")
generateCmd.Flags().StringArrayVar(&balRange, "bal", []string{}, "Application Count")
+ generateCmd.Flags().BoolVarP(&deterministicKeys, "deterministic", "", false, "Whether to generate deterministic keys")
+ generateCmd.Flags().Uint64VarP(&lastPartKeyRound, "last-part-key-round", "", gen.DefaultGenesis.LastPartKeyRound, "LastPartKeyRound in genesis.json")
longParts := make([]string, len(generateTemplateLines)+1)
longParts[0] = generateCmd.Long
@@ -184,7 +188,7 @@ template modes for -t:`,
if len(balRange) < 2 {
reportErrorf("must specify account balance range with --bal.")
}
- err = generateAccountsLoadingFileTemplate(outputFilename, sourceWallet, rounds, roundTxnCount, accountsCount, assetsCount, applicationCount, balRange)
+ err = generateAccountsLoadingFileTemplate(outputFilename, sourceWallet, rounds, roundTxnCount, accountsCount, assetsCount, applicationCount, balRange, deterministicKeys)
default:
reportInfoln("Please specify a valid template name.\nSupported templates are:")
for _, line := range generateTemplateLines {
@@ -528,6 +532,9 @@ func generateWalletGenesis(filename string, wallets, npnNodes int) error {
}
func saveGenesisDataToDisk(genesisData gen.GenesisData, filename string) error {
+ if lastPartKeyRound != 0 {
+ genesisData.LastPartKeyRound = lastPartKeyRound
+ }
f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err == nil {
defer f.Close()
@@ -538,7 +545,7 @@ func saveGenesisDataToDisk(genesisData gen.GenesisData, filename string) error {
return err
}
-func generateAccountsLoadingFileTemplate(templateFilename, sourceWallet string, rounds, roundTxnCount, accountsCount, assetsCount, applicationCount uint64, balRange []string) error {
+func generateAccountsLoadingFileTemplate(templateFilename, sourceWallet string, rounds, roundTxnCount, accountsCount, assetsCount, applicationCount uint64, balRange []string, deterministicKeys bool) error {
min, err := strconv.ParseInt(balRange[0], 0, 64)
if err != nil {
@@ -557,6 +564,7 @@ func generateAccountsLoadingFileTemplate(templateFilename, sourceWallet string,
GeneratedApplicationCount: applicationCount,
SourceWalletName: sourceWallet,
BalanceRange: []int64{min, max},
+ DeterministicKeys: deterministicKeys,
}
return saveLoadingFileDataToDisk(data, templateFilename)
}
diff --git a/cmd/netgoal/network.go b/cmd/netgoal/network.go
index f0efdc2cd..6fa6b317d 100644
--- a/cmd/netgoal/network.go
+++ b/cmd/netgoal/network.go
@@ -78,7 +78,7 @@ var networkBuildCmd = &cobra.Command{
},
}
-func runBuildNetwork() (err error) {
+func runBuildNetwork() error {
if cpuprofilePath != "" {
f, err := os.Create(cpuprofilePath)
if err != nil {
@@ -93,7 +93,7 @@ func runBuildNetwork() (err error) {
networkRootDir, err := filepath.Abs(networkRootDir)
if err != nil {
- return
+ return err
}
// Make sure target directory doesn't already exist
exists := util.FileExists(networkRootDir)
@@ -109,7 +109,7 @@ func runBuildNetwork() (err error) {
}
if networkRecipeFile, err = filepath.Abs(networkRecipeFile); err != nil {
- return
+ return err
}
var r recipe
@@ -126,8 +126,8 @@ func runBuildNetwork() (err error) {
return fmt.Errorf("error loading Build Config file: %v", err)
}
for _, kev := range miscStringStringTokens {
- ab := strings.SplitN(kev, "=", 2)
- buildConfig.MiscStringString = append(buildConfig.MiscStringString, "{{"+ab[0]+"}}", ab[1])
+ k, v, _ := strings.Cut(kev, "=")
+ buildConfig.MiscStringString = append(buildConfig.MiscStringString, "{{"+k+"}}", v)
}
networkTemplateFile := resolveFile(r.NetworkFile, templateBaseDir)
diff --git a/cmd/opdoc/opdoc.go b/cmd/opdoc/opdoc.go
index b5e039a2b..9c0bbd86b 100644
--- a/cmd/opdoc/opdoc.go
+++ b/cmd/opdoc/opdoc.go
@@ -482,7 +482,7 @@ func main() {
AVMType: t.AVMType.String(),
})
}
- sort.Slice(named, func(i, j int) bool { return strings.Compare(named[i].Name, named[j].Name) > 0 })
+ sort.Slice(named, func(i, j int) bool { return named[i].Name > named[j].Name })
namedStackTypes := create("named_stack_types.md")
namedStackTypesMarkdown(namedStackTypes, named)
diff --git a/cmd/partitiontest_linter/go.mod b/cmd/partitiontest_linter/go.mod
index 57f9fbb1f..88ea431f9 100644
--- a/cmd/partitiontest_linter/go.mod
+++ b/cmd/partitiontest_linter/go.mod
@@ -1,9 +1,9 @@
module github.com/algorand/go-algorand/cmd/partitiontest_linter
-go 1.17
+go 1.20
-require golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect
+require golang.org/x/sys v0.8.0 // indirect
-require golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
+require golang.org/x/mod v0.10.0 // indirect
-require golang.org/x/tools v0.1.12
+require golang.org/x/tools v0.9.3
diff --git a/cmd/partitiontest_linter/go.sum b/cmd/partitiontest_linter/go.sum
index 4965383ee..a5ed5c851 100644
--- a/cmd/partitiontest_linter/go.sum
+++ b/cmd/partitiontest_linter/go.sum
@@ -1,6 +1,7 @@
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
-golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
-golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
+golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
+golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=
+golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
diff --git a/cmd/pingpong/runCmd.go b/cmd/pingpong/runCmd.go
index fe545f174..ae1db4106 100644
--- a/cmd/pingpong/runCmd.go
+++ b/cmd/pingpong/runCmd.go
@@ -153,14 +153,14 @@ var runCmd = &cobra.Command{
reportErrorf("Cannot make temp dir: %v\n", err)
}
if cpuprofile != "" {
- proff, err := os.Create(cpuprofile)
- if err != nil {
- reportErrorf("%s: %v\n", cpuprofile, err)
+ proff, profErr := os.Create(cpuprofile)
+ if profErr != nil {
+ reportErrorf("%s: %v\n", cpuprofile, profErr)
}
defer proff.Close()
- err = pprof.StartCPUProfile(proff)
- if err != nil {
- reportErrorf("%s: StartCPUProfile %v\n", cpuprofile, err)
+ profErr = pprof.StartCPUProfile(proff)
+ if profErr != nil {
+ reportErrorf("%s: StartCPUProfile %v\n", cpuprofile, profErr)
}
defer pprof.StopCPUProfile()
}
@@ -172,18 +172,18 @@ var runCmd = &cobra.Command{
}
if pidFile != "" {
- pidf, err := os.Create(pidFile)
- if err != nil {
- reportErrorf("%s: %v\n", pidFile, err)
+ pidf, pidErr := os.Create(pidFile)
+ if pidErr != nil {
+ reportErrorf("%s: %v\n", pidFile, pidErr)
}
defer os.Remove(pidFile)
- _, err = fmt.Fprintf(pidf, "%d", os.Getpid())
- if err != nil {
- reportErrorf("%s: %v\n", pidFile, err)
+ _, pidErr = fmt.Fprintf(pidf, "%d", os.Getpid())
+ if pidErr != nil {
+ reportErrorf("%s: %v\n", pidFile, pidErr)
}
- err = pidf.Close()
- if err != nil {
- reportErrorf("%s: %v\n", pidFile, err)
+ pidErr = pidf.Close()
+ if pidErr != nil {
+ reportErrorf("%s: %v\n", pidFile, pidErr)
}
}
diff --git a/cmd/tealdbg/local.go b/cmd/tealdbg/local.go
index 8e9d9b81f..d3a9e57fc 100644
--- a/cmd/tealdbg/local.go
+++ b/cmd/tealdbg/local.go
@@ -28,6 +28,7 @@ import (
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/ledger/apply"
"github.com/algorand/go-algorand/protocol"
+ "golang.org/x/exp/slices"
)
func protoFromString(protoString string) (name string, proto config.ConsensusParams, err error) {
@@ -190,8 +191,7 @@ func (a *AppState) clone() (b AppState) {
b.locals[addr][aid] = tkv.Clone()
}
}
- b.logs = make([]string, len(a.logs))
- copy(b.logs, a.logs)
+ b.logs = slices.Clone(a.logs)
b.innerTxns = cloneInners(a.innerTxns)
return
}
@@ -298,14 +298,15 @@ func determineEvalMode(program []byte, modeIn string) (mode modeType, err error)
// - Sources from command line file names.
// - Programs mentioned in transaction group txnGroup.
// - if DryrunRequest present and no sources or transaction group set in command line then:
-// 1. DryrunRequest.Sources are expanded to DryrunRequest.Apps or DryrunRequest.Txns.
-// 2. DryrunRequest.Apps are expanded into DryrunRequest.Txns.
-// 3. txnGroup is set to DryrunRequest.Txns
+// 1. DryrunRequest.Sources are expanded to DryrunRequest.Apps or DryrunRequest.Txns.
+// 2. DryrunRequest.Apps are expanded into DryrunRequest.Txns.
+// 3. txnGroup is set to DryrunRequest.Txns
+//
// Application search by id:
-// - Balance records from CLI or DryrunRequest.Accounts
-// - If no balance records set in CLI then DryrunRequest.Accounts and DryrunRequest.Apps are used.
-// In this case Accounts data is used as a base for balance records creation,
-// and Apps supply updates to AppParams field.
+// - Balance records from CLI or DryrunRequest.Accounts
+// - If no balance records set in CLI then DryrunRequest.Accounts and DryrunRequest.Apps are used.
+// In this case Accounts data is used as a base for balance records creation,
+// and Apps supply updates to AppParams field.
func (r *LocalRunner) Setup(dp *DebugParams) (err error) {
ddr, err := ddrFromParams(dp)
if err != nil {
diff --git a/cmd/updater/update.sh b/cmd/updater/update.sh
index 94a4b2ba2..6c20d55c2 100755
--- a/cmd/updater/update.sh
+++ b/cmd/updater/update.sh
@@ -537,7 +537,7 @@ function install_new_binaries() {
if [ ! -d ${UPDATESRCDIR}/bin ]; then
return 0
else
- echo "Installing new binary files..."
+ echo "Installing new binary files into ${BINDIR}"
ROLLBACKBIN=1
rm -rf ${BINDIR}/new
mkdir ${BINDIR}/new
diff --git a/components/mocks/mockCatchpointCatchupAccessor.go b/components/mocks/mockCatchpointCatchupAccessor.go
index 0b45f42be..f488879e7 100644
--- a/components/mocks/mockCatchpointCatchupAccessor.go
+++ b/components/mocks/mockCatchpointCatchupAccessor.go
@@ -19,9 +19,11 @@ package mocks
import (
"context"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
)
// MockCatchpointCatchupAccessor is a dummy CatchpointCatchupAccessor implementation which doesn't do anything.
@@ -67,6 +69,11 @@ func (m *MockCatchpointCatchupAccessor) GetCatchupBlockRound(ctx context.Context
return basics.Round(0), nil
}
+// GetVerifyData returns the balances hash, spver hash and totals used by VerifyCatchpoint
+func (m *MockCatchpointCatchupAccessor) GetVerifyData(ctx context.Context) (balancesHash crypto.Digest, spverHash crypto.Digest, totals ledgercore.AccountTotals, err error) {
+ return crypto.Digest{}, crypto.Digest{}, ledgercore.AccountTotals{}, nil
+}
+
// VerifyCatchpoint verifies that the catchpoint is valid by reconstructing the label.
func (m *MockCatchpointCatchupAccessor) VerifyCatchpoint(ctx context.Context, blk *bookkeeping.Block) (err error) {
return nil
diff --git a/config/config.go b/config/config.go
index 1da9dd059..fc2dd3005 100644
--- a/config/config.go
+++ b/config/config.go
@@ -81,6 +81,14 @@ const ConfigurableConsensusProtocolsFilename = "consensus.json"
// do not expose in normal config so it is not in code generated local_defaults.go
const defaultRelayGossipFanout = 8
+// MaxGenesisIDLen is the maximum length of the genesis ID set for purpose of setting
+// allocbounds on structs containing GenesisID and for purposes of calculating MaxSize functions
+// on those types. Current value is larger than the existing network IDs and the ones used in testing
+const MaxGenesisIDLen = 128
+
+// MaxEvalDeltaTotalLogSize is the maximum size of the sum of all log sizes in a single eval delta.
+const MaxEvalDeltaTotalLogSize = 1024
+
// LoadConfigFromDisk returns a Local config structure based on merging the defaults
// with settings loaded from the config file from the custom dir. If the custom file
// cannot be loaded, the default config is returned (with the error from loading the
diff --git a/config/config_test.go b/config/config_test.go
index 1e24fa459..cc089fc3a 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -23,6 +23,7 @@ import (
"os"
"path/filepath"
"reflect"
+ "regexp"
"strings"
"testing"
@@ -256,7 +257,7 @@ func TestLocal_ConfigMigrate(t *testing.T) {
a := require.New(t)
- c0, err := loadWithoutDefaults(getVersionedDefaultLocalConfig(0))
+ c0, err := loadWithoutDefaults(GetVersionedDefaultLocalConfig(0))
a.NoError(err)
c0, err = migrate(c0)
a.NoError(err)
@@ -271,8 +272,8 @@ func TestLocal_ConfigMigrate(t *testing.T) {
a.Error(err)
// Ensure we don't migrate values that aren't the default old version
- c0Modified := getVersionedDefaultLocalConfig(0)
- c0Modified.BaseLoggerDebugLevel = getVersionedDefaultLocalConfig(0).BaseLoggerDebugLevel + 1
+ c0Modified := GetVersionedDefaultLocalConfig(0)
+ c0Modified.BaseLoggerDebugLevel = GetVersionedDefaultLocalConfig(0).BaseLoggerDebugLevel + 1
c0Modified, err = migrate(c0Modified)
a.NoError(err)
a.NotEqual(defaultLocal, c0Modified)
@@ -310,11 +311,12 @@ func TestLocal_ConfigInvariant(t *testing.T) {
a.NoError(err)
configsPath := filepath.Join(ourPath, "../test/testdata/configs")
- for configVersion := uint32(0); configVersion <= getLatestConfigVersion(); configVersion++ {
+ // for configVersion := uint32(0); configVersion <= getLatestConfigVersion(); configVersion++ {
+ for configVersion := uint32(27); configVersion <= 27; configVersion++ {
c := Local{}
err = codecs.LoadObjectFromFile(filepath.Join(configsPath, fmt.Sprintf("config-v%d.json", configVersion)), &c)
a.NoError(err)
- a.Equal(getVersionedDefaultLocalConfig(configVersion), c)
+ a.Equal(GetVersionedDefaultLocalConfig(configVersion), c)
}
}
@@ -394,22 +396,42 @@ func TestLocal_DNSBootstrapArray(t *testing.T) {
name string
fields fields
args args
- wantBootstrapArray []string
+ wantBootstrapArray []*DNSBootstrap
}{
{name: "test1",
fields: fields{DNSBootstrapID: "<network>.cloudflare.com"},
args: args{networkID: "devnet"},
- wantBootstrapArray: []string{"devnet.cloudflare.com"},
+ wantBootstrapArray: []*DNSBootstrap{{PrimarySRVBootstrap: "devnet.cloudflare.com"}},
},
{name: "test2",
fields: fields{DNSBootstrapID: "<network>.cloudflare.com;<network>.cloudfront.com"},
args: args{networkID: "devnet"},
- wantBootstrapArray: []string{"devnet.cloudflare.com", "devnet.cloudfront.com"},
+ wantBootstrapArray: []*DNSBootstrap{{PrimarySRVBootstrap: "devnet.cloudflare.com"}, {PrimarySRVBootstrap: "devnet.cloudfront.com"}},
},
{name: "test3",
fields: fields{DNSBootstrapID: ""},
args: args{networkID: "devnet"},
- wantBootstrapArray: []string{},
+ wantBootstrapArray: []*DNSBootstrap(nil),
+ },
+ {name: "test4 - intended to mismatch local template",
+ fields: fields{DNSBootstrapID: "<network>.algorand.network?backup=<network>.algorand.net&dedup=<name>.algorand-<network>.(network|net)"},
+ args: args{networkID: "testnet"},
+ wantBootstrapArray: []*DNSBootstrap{{PrimarySRVBootstrap: "testnet.algorand.network",
+ BackupSRVBootstrap: "testnet.algorand.net",
+ DedupExp: regexp.MustCompile("(algorand-testnet.(network|net))")}},
+ },
+ {name: "test5 - intended to match legacy template",
+ fields: fields{DNSBootstrapID: "<network>.algorand.network"},
+ args: args{networkID: "testnet"},
+ wantBootstrapArray: []*DNSBootstrap{{PrimarySRVBootstrap: "testnet.algorand.network"}},
+ },
+ {name: "test6 - exercise record append with full template",
+ fields: fields{DNSBootstrapID: "<network>.algorand.network?backup=<network>.algorand.net&dedup=<name>.algorand-<network>.(network|net);<network>.cloudfront.com"},
+ args: args{networkID: "devnet"},
+ wantBootstrapArray: []*DNSBootstrap{{PrimarySRVBootstrap: "devnet.algorand.network",
+ BackupSRVBootstrap: "devnet.algorand.net",
+ DedupExp: regexp.MustCompile("(algorand-devnet.(network|net))")},
+ {PrimarySRVBootstrap: "devnet.cloudfront.com"}},
},
}
for _, tt := range tests {
@@ -420,57 +442,26 @@ func TestLocal_DNSBootstrapArray(t *testing.T) {
if gotBootstrapArray := cfg.DNSBootstrapArray(tt.args.networkID); !reflect.DeepEqual(gotBootstrapArray, tt.wantBootstrapArray) {
t.Errorf("Local.DNSBootstrapArray() = %#v, want %#v", gotBootstrapArray, tt.wantBootstrapArray)
}
+ // handling should be identical to DNSBootstrapArray method for all of these cases
+ if gotBootstrapArray, _ := cfg.ValidateDNSBootstrapArray(tt.args.networkID); !reflect.DeepEqual(gotBootstrapArray, tt.wantBootstrapArray) {
+ t.Errorf("Local.DNSBootstrapArray() = %#v, want %#v", gotBootstrapArray, tt.wantBootstrapArray)
+ }
})
}
}
-func TestLocal_DNSBootstrap(t *testing.T) {
+func TestLocal_ValidateDNSBootstrapArray_StopsOnError(t *testing.T) {
partitiontest.PartitionTest(t)
- t.Parallel()
- type fields struct {
- DNSBootstrapID string
- }
- type args struct {
- network protocol.NetworkID
- }
- tests := []struct {
- name string
- fields fields
- args args
- want string
- }{
- {name: "test1",
- fields: fields{DNSBootstrapID: "<network>.cloudflare.com"},
- args: args{network: "devnet"},
- want: "devnet.cloudflare.com",
- },
- {name: "test2",
- fields: fields{DNSBootstrapID: "<network>.cloudflare.com;"},
- args: args{network: "devnet"},
- want: "devnet.cloudflare.com;",
- },
- {name: "test3",
- fields: fields{DNSBootstrapID: "<network>.cloudflare.com;<network>.cloudfront.com"},
- args: args{network: "devnet"},
- want: "devnet.cloudflare.com;devnet.cloudfront.com",
- },
- {name: "test4",
- fields: fields{DNSBootstrapID: "<network>.cloudflare.com;<network>.cloudfront.com;"},
- args: args{network: "devnet"},
- want: "devnet.cloudflare.com;devnet.cloudfront.com;",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- cfg := Local{
- DNSBootstrapID: tt.fields.DNSBootstrapID,
- }
- if got := cfg.DNSBootstrap(tt.args.network); got != tt.want {
- t.Errorf("Local.DNSBootstrap() = %v, want %v", got, tt.want)
- }
- })
+ var dnsBootstrapIDWithInvalidNameMacroUsage = "<network>.algorand.network?backup=<network>.algorand.net&dedup=<name>.algorand-<network>.((network|net)"
+
+ cfg := Local{
+ DNSBootstrapID: dnsBootstrapIDWithInvalidNameMacroUsage,
}
+
+ _, err := cfg.ValidateDNSBootstrapArray(Mainnet)
+
+ assert.ErrorContains(t, err, bootstrapDedupRegexDoesNotCompile)
}
func TestLocal_StructTags(t *testing.T) {
@@ -522,7 +513,7 @@ func TestLocal_GetVersionedDefaultLocalConfig(t *testing.T) {
t.Parallel()
for i := uint32(0); i < getLatestConfigVersion(); i++ {
- localVersion := getVersionedDefaultLocalConfig(i)
+ localVersion := GetVersionedDefaultLocalConfig(i)
require.Equal(t, uint32(i), localVersion.Version)
}
}
diff --git a/config/consensus.go b/config/consensus.go
index 4f4680c5e..01d884863 100644
--- a/config/consensus.go
+++ b/config/consensus.go
@@ -583,13 +583,46 @@ var MaxBytesKeyValueLen int
var MaxExtraAppProgramLen int
// MaxAvailableAppProgramLen is the largest supported app program size include the extra pages
-//supported supported by any of the consensus protocols. used for decoding purposes.
+// supported supported by any of the consensus protocols. used for decoding purposes.
var MaxAvailableAppProgramLen int
// MaxProposedExpiredOnlineAccounts is the maximum number of online accounts, which need
// to be taken offline, that would be proposed to be taken offline.
var MaxProposedExpiredOnlineAccounts int
+// MaxAppTotalArgLen is the maximum number of bytes across all arguments of an application
+// max sum([len(arg) for arg in txn.ApplicationArgs])
+var MaxAppTotalArgLen int
+
+// MaxAssetNameBytes is the maximum asset name length in bytes
+var MaxAssetNameBytes int
+
+// MaxAssetUnitNameBytes is the maximum asset unit name length in bytes
+var MaxAssetUnitNameBytes int
+
+// MaxAssetURLBytes is the maximum asset URL length in bytes
+var MaxAssetURLBytes int
+
+// MaxAppBytesValueLen is the maximum length of a bytes value used in an application's global or
+// local key/value store
+var MaxAppBytesValueLen int
+
+// MaxAppBytesKeyLen is the maximum length of a key used in an application's global or local
+// key/value store
+var MaxAppBytesKeyLen int
+
+// StateProofTopVoters is a bound on how many online accounts get to
+// participate in forming the state proof, by including the
+// top StateProofTopVoters accounts (by normalized balance) into the
+// vector commitment.
+var StateProofTopVoters int
+
+// MaxTxnBytesPerBlock determines the maximum number of bytes
+// that transactions can take up in a block. Specifically,
+// the sum of the lengths of encodings of each transaction
+// in a block must not exceed MaxTxnBytesPerBlock.
+var MaxTxnBytesPerBlock int
+
func checkSetMax(value int, curMax *int) {
if value > *curMax {
*curMax = value
@@ -627,6 +660,17 @@ func checkSetAllocBounds(p ConsensusParams) {
checkSetMax(p.MaxAppProgramLen, &MaxLogCalls)
checkSetMax(p.MaxInnerTransactions*p.MaxTxGroupSize, &MaxInnerTransactionsPerDelta)
checkSetMax(p.MaxProposedExpiredOnlineAccounts, &MaxProposedExpiredOnlineAccounts)
+
+ // These bounds are exported to make them available to the msgp generator for calculating
+ // maximum valid message size for each message going across the wire.
+ checkSetMax(p.MaxAppTotalArgLen, &MaxAppTotalArgLen)
+ checkSetMax(p.MaxAssetNameBytes, &MaxAssetNameBytes)
+ checkSetMax(p.MaxAssetUnitNameBytes, &MaxAssetUnitNameBytes)
+ checkSetMax(p.MaxAssetURLBytes, &MaxAssetURLBytes)
+ checkSetMax(p.MaxAppBytesValueLen, &MaxAppBytesValueLen)
+ checkSetMax(p.MaxAppKeyLen, &MaxAppBytesKeyLen)
+ checkSetMax(int(p.StateProofTopVoters), &StateProofTopVoters)
+ checkSetMax(p.MaxTxnBytesPerBlock, &MaxTxnBytesPerBlock)
}
// SaveConfigurableConsensus saves the configurable protocols file to the provided data directory.
diff --git a/config/defaultsGenerator/defaultsGenerator.go b/config/defaultsGenerator/defaultsGenerator.go
index 9c3365f89..d497ea9f2 100644
--- a/config/defaultsGenerator/defaultsGenerator.go
+++ b/config/defaultsGenerator/defaultsGenerator.go
@@ -35,6 +35,8 @@ var outputfilename = flag.String("o", "", "Name of the file where the generated
var packageName = flag.String("p", "", "Name of the package.")
var headerFileName = flag.String("h", "", "Name of the header filename")
var jsonExampleFileName = flag.String("j", "", "Name of the json example file")
+var testConfig = flag.String("t", "", "Template name of the test/testdata/configs/config-vXX file")
+var testConfigVersion = flag.Int("tv", 0, "Test config version to write into test/testdata/configs/config-vXX file")
var autoGenHeader = `
// This file was auto generated by ./config/defaultsGenerator/defaultsGenerator.go, and SHOULD NOT BE MODIFIED in any way
@@ -80,6 +82,19 @@ func main() {
if err != nil {
printExit("Unable to write file %s : %v", *jsonExampleFileName, err)
}
+ if *testConfig != "" {
+ configVersion := config.AutogenLocal.Version
+ configBytes := autoDefaultsBytes
+ if *testConfigVersion != 0 {
+ configVersion = uint32(*testConfigVersion)
+ configBytes = []byte(prettyPrint(config.GetVersionedDefaultLocalConfig(configVersion), "json"))
+ }
+ testConfigFilename := fmt.Sprintf(*testConfig, configVersion)
+ err = os.WriteFile(testConfigFilename, configBytes, 0644)
+ if err != nil {
+ printExit("Unable to write file %s : %v", testConfigFilename, err)
+ }
+ }
}
type byFieldName []reflect.StructField
diff --git a/config/dnsbootstrap.go b/config/dnsbootstrap.go
new file mode 100644
index 000000000..2f45d2dd8
--- /dev/null
+++ b/config/dnsbootstrap.go
@@ -0,0 +1,165 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package config
+
+import (
+ "fmt"
+ "net/url"
+ "regexp"
+ "strings"
+
+ "github.com/algorand/go-algorand/protocol"
+)
+
+// DNSBootstrap represents parsed / validated components derived from a DNSBootstrapID
+type DNSBootstrap struct {
+ PrimarySRVBootstrap string
+
+ // Optional Fields
+ BackupSRVBootstrap string
+
+ // Per documentation, thread-safe save for configuration method
+ DedupExp *regexp.Regexp
+}
+
+var networkBootstrapOverrideMap = map[protocol.NetworkID]DNSBootstrap{
+ Devnet: {
+ PrimarySRVBootstrap: "devnet.algodev.network",
+ BackupSRVBootstrap: "",
+ DedupExp: nil,
+ },
+ Betanet: {
+ PrimarySRVBootstrap: "betanet.algodev.network",
+ BackupSRVBootstrap: "",
+ DedupExp: nil,
+ },
+ Alphanet: {
+ PrimarySRVBootstrap: "alphanet.algodev.network",
+ BackupSRVBootstrap: "",
+ DedupExp: nil,
+ },
+}
+
+var nameExp = regexp.MustCompile(`<name>\.?`)
+
+// Error strings
+const (
+ bootstrapErrorEmpty = "DNSBootstrapID must be non-empty and a valid URL"
+ bootstrapErrorInvalidFormat = "invalid formatted DNSBootstrapID"
+ bootstrapErrorParsingQueryParams = "error parsing query params from DNSBootstrapID"
+ bootstrapErrorInvalidNameMacroUsage = "invalid usage of <name> macro in dedup param; must be at the beginning of the expression"
+ bootstrapDedupRegexDoesNotCompile = "dedup regex does not compile"
+)
+
+// For supported networks, supports template formats like
+// `<network>.algorand.network?backup=<network>.algorand.net&dedup=<name>.algorand-<network>.(net|network)`
+
+/**
+ * Validates and parses a DNSBootstrapID into a DNSBootstrap struct. We use Golang's url.ParseQuery as
+ * a convenience to parse out the ID and parameters (as the rules overlap cleanly).
+ *
+ * Non-exhaustive examples of valid formats:
+ *
+ * 1. <network>.algorand.network
+ * 2. myawesomebootstrap-<network>.specialdomain.com
+ * 3. <network>.algorand.network?backup=<network>.algorand.net
+ * 4. <network>.algorand.network?backup=<network>.algorand.net&dedup=<name>.algorand-<network>.(net|network)
+ * 5. <network>.algorand.network?backup=<network>.algorand.net&dedup=<name>.algorand-<network>.(net|network)
+ * 6. mybootstrap-<network>.sd.com?backup=mybackup-<network>.asd.net&dedup=<name>.md-<network>.(com|net)
+ *
+ * A few notes:
+ * 1. The network parameter to this function is substituted into the dnsBootstrapID anywhere that <network> appears.
+ * 2. The backup parameter's presence in the dNSBootstrapID is optional
+ *
+ * On the dedup mask/expression in particular:
+ * 1. The dedup mask/expression is intended to be used to deduplicate SRV records returned from the primary and backup DNS servers
+ * 2. It is optional, even if backup is present. The dedup mask/expression must be a valid regular expression if set.
+ * 3. If the <name> macro is used in the dedup mask/expression (in most circumstances, recommended), it must be at the beginning of the expression. It is intended as a placeholder for unique server names.
+ *
+ * @param dnsBootstrapID The DNSBootstrapID to parse
+ * @param network The network to substitute into the DNSBootstrapID
+ * @param defaultTemplateOverridden Whether the default template was overridden at runtime
+ * @return A DNSBootstrap struct if successful, error otherwise
+ */
+func parseDNSBootstrap(dnsBootstrapID string, network protocol.NetworkID, defaultTemplateOverridden bool) (*DNSBootstrap, error) {
+ // For several non-mainnet/testnet networks, we essentially ignore the bootstrap and use our own
+ // if template was not overridden
+ if !defaultTemplateOverridden {
+ bootstrap, exists := networkBootstrapOverrideMap[network]
+ if exists {
+ return &bootstrap, nil
+ }
+ }
+
+ // Normalize the dnsBootstrapID and insert the network
+ dnsBootstrapID = strings.Replace(strings.TrimSpace(strings.ToLower(dnsBootstrapID)), "<network>", string(network), -1)
+
+ if dnsBootstrapID == "" {
+ return nil, fmt.Errorf(bootstrapErrorEmpty)
+ }
+
+ parsedTemplate, err := url.Parse(dnsBootstrapID)
+
+ if err != nil || parsedTemplate.Host == "" {
+ // Try parsing with scheme prepended
+ var err2 error
+ parsedTemplate, err2 = url.Parse("https://" + dnsBootstrapID)
+
+ if err2 != nil {
+ return nil, fmt.Errorf("%s: %s, orig error: %s, with scheme error: %s",
+ bootstrapErrorInvalidFormat, dnsBootstrapID, err, err2)
+ }
+ }
+
+ m, err3 := url.ParseQuery(parsedTemplate.RawQuery)
+
+ if err3 != nil {
+ return nil, fmt.Errorf("%s: %s, error: %s", bootstrapErrorParsingQueryParams, dnsBootstrapID, err3)
+ }
+
+ backupBootstrapParam := m["backup"]
+ var backupSRVBootstrap string
+ if len(backupBootstrapParam) != 0 && backupBootstrapParam[0] != "" {
+ backupSRVBootstrap = backupBootstrapParam[0]
+ }
+
+ var dedupExp *regexp.Regexp
+ if backupSRVBootstrap != "" {
+ //dedup mask is optional, even with backup present
+ dedupParam := m["dedup"]
+ if len(dedupParam) != 0 && dedupParam[0] != "" {
+ // If <name> shows up anywhere other than the beginning of the dedup expression, we return an error.
+ nameMacroLocations := nameExp.FindAllStringIndex(dedupParam[0], -1)
+ for _, loc := range nameMacroLocations {
+ if loc[0] != 0 {
+ return nil, fmt.Errorf("%s: %s", bootstrapErrorInvalidNameMacroUsage, dnsBootstrapID)
+ }
+ }
+ // If the string happens to start with <name>, we replace it with an empty string.
+ dedupParam[0] = nameExp.ReplaceAllString(dedupParam[0], "")
+
+ var err4 error
+ dedupExp, err4 = regexp.Compile("(" + dedupParam[0] + ")")
+
+ if err4 != nil {
+ return nil, fmt.Errorf("%s: %s, error: %s", bootstrapDedupRegexDoesNotCompile, dnsBootstrapID, err4)
+ }
+ }
+ }
+
+ return &DNSBootstrap{PrimarySRVBootstrap: parsedTemplate.Host, BackupSRVBootstrap: backupSRVBootstrap, DedupExp: dedupExp}, nil
+}
diff --git a/config/dnsbootstrap_test.go b/config/dnsbootstrap_test.go
new file mode 100644
index 000000000..431d321d0
--- /dev/null
+++ b/config/dnsbootstrap_test.go
@@ -0,0 +1,223 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package config
+
+import (
+ "github.com/algorand/go-algorand/protocol"
+ "pgregory.net/rapid"
+ "strings"
+ "testing"
+
+ "github.com/algorand/go-algorand/internal/rapidgen"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/assert"
+)
+
+/**
+TODOs:
+* Consider a test with random dedup TLDs?
+*/
+
+func bootstrapParsingNetworkGen() *rapid.Generator[string] {
+ return rapid.OneOf(rapid.StringMatching(string(Testnet)), rapid.StringMatching(string(Mainnet)),
+ rapid.StringMatching(string(Devtestnet)), rapid.StringMatching(string(Devnet)),
+ rapid.StringMatching(string(Betanet)), rapid.StringMatching(string(Alphanet)))
+}
+
+func bootstrapHardCodedNetworkGen() *rapid.Generator[string] {
+ return rapid.OneOf(rapid.StringMatching(string(Devnet)), rapid.StringMatching(string(Betanet)),
+ rapid.StringMatching(string(Alphanet)))
+}
+
+func TestParseDNSBootstrapIDBackupWithExpectedDefaultTemplate(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ rapid.Check(t, func(t1 *rapid.T) {
+ network := protocol.NetworkID(bootstrapParsingNetworkGen().Draw(t1, "network"))
+
+ var expectedDefaultTemplate = "<network>.algorand.network?backup=<network>.algorand.net&dedup=<name>.algorand-<network>.(network|net)"
+
+ dnsBootstrap, err := parseDNSBootstrap(expectedDefaultTemplate, network, true)
+
+ assert.NoError(t, err)
+
+ assert.True(t, strings.EqualFold(strings.Replace("<network>.algorand.network", "<network>",
+ string(network), -1), dnsBootstrap.PrimarySRVBootstrap))
+ assert.True(t, strings.EqualFold(strings.Replace("<network>.algorand.net", "<network>",
+ string(network), -1), dnsBootstrap.BackupSRVBootstrap))
+ assert.Equal(t,
+ strings.Replace("(algorand-<network>.(network|net))", "<network>", string(network), -1),
+ dnsBootstrap.DedupExp.String())
+ })
+}
+
+func TestParseDNSBootstrapIDBackupWithHardCodedNetworkBootstraps(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ rapid.Check(t, func(t1 *rapid.T) {
+ network := protocol.NetworkID(bootstrapHardCodedNetworkGen().Draw(t1, "network"))
+
+ var expectedDefaultTemplate = "<network>.algorand.network?backup=<network>.algorand.net" +
+ "&dedup=<name>.algorand-<network>.(network|net)"
+
+ dnsBootstrap, err := parseDNSBootstrap(expectedDefaultTemplate, network, false)
+
+ assert.NoError(t, err)
+
+ assert.Equal(t, strings.Replace("<network>.algodev.network", "<network>",
+ string(network), -1), dnsBootstrap.PrimarySRVBootstrap)
+ assert.Equal(t, "", dnsBootstrap.BackupSRVBootstrap)
+ assert.Nil(t, dnsBootstrap.DedupExp)
+ })
+}
+
+func TestParseDNSBootstrapIDWithLegacyTemplate(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ rapid.Check(t, func(t1 *rapid.T) {
+ network := protocol.NetworkID(bootstrapParsingNetworkGen().Draw(t1, "network"))
+
+ var expectedDefaultTemplate = "<network>.algorand.network"
+
+ dnsBootstrap, err := parseDNSBootstrap(expectedDefaultTemplate, network, true)
+
+ assert.NoError(t, err)
+
+ assert.True(t, strings.EqualFold(strings.Replace(expectedDefaultTemplate, "<network>",
+ string(network), -1), dnsBootstrap.PrimarySRVBootstrap))
+ assert.True(t, strings.EqualFold("", dnsBootstrap.BackupSRVBootstrap))
+ assert.Nil(t, dnsBootstrap.DedupExp)
+ })
+}
+
+func TestParseDNSBootstrapIDNoBackup(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ rapid.Check(t, func(t1 *rapid.T) {
+ network := protocol.NetworkID(bootstrapParsingNetworkGen().Draw(t1, "network"))
+ domainGen := rapidgen.Domain()
+ primaryDomain := domainGen.Draw(t1, "domain")
+ includeDedup := rapid.Bool().Draw(t1, "with Dedup")
+ includeHTTPS := rapid.Bool().Draw(t1, "with HTTPS")
+
+ primaryDomainInput := primaryDomain
+ // Should be ignored without backup parameter being set
+ if includeDedup {
+ primaryDomainInput += "?dedup=<name>.algorand-<network>.(net|network)"
+ }
+
+ if includeHTTPS {
+ primaryDomainInput = "https://" + primaryDomainInput
+ }
+
+ dnsBootstrap, err := parseDNSBootstrap(primaryDomainInput, network, true)
+
+ assert.NoError(t, err)
+
+ assert.True(t, strings.EqualFold(primaryDomain, dnsBootstrap.PrimarySRVBootstrap))
+ assert.Equal(t, "", dnsBootstrap.BackupSRVBootstrap)
+ assert.Nil(t, dnsBootstrap.DedupExp)
+ })
+}
+
+func TestParseDNSBootstrapIDBackupNoDedup(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ rapid.Check(t, func(t1 *rapid.T) {
+ network := protocol.NetworkID(bootstrapParsingNetworkGen().Draw(t1, "network"))
+ domainGen := rapidgen.Domain()
+ primaryDomain := domainGen.Draw(t1, "domain")
+ backupDomain := domainGen.Draw(t1, "backupDomain")
+
+ dnsBootstrap, err := parseDNSBootstrap(primaryDomain+"?backup="+backupDomain, network, true)
+
+ assert.NoError(t, err)
+
+ assert.True(t, strings.EqualFold(primaryDomain, dnsBootstrap.PrimarySRVBootstrap))
+ assert.True(t, strings.EqualFold(backupDomain, dnsBootstrap.BackupSRVBootstrap))
+ assert.Nil(t, dnsBootstrap.DedupExp)
+ })
+}
+
+func TestParseDNSBootstrapIDBackupWithSingleDomainDedup(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ rapid.Check(t, func(t1 *rapid.T) {
+ network := protocol.NetworkID(bootstrapParsingNetworkGen().Draw(t1, "network"))
+ domainGen := rapidgen.Domain()
+ primaryDomain := domainGen.Draw(t1, "domain")
+ backupDomain := domainGen.Draw(t1, "backupDomain")
+
+ var defaultExpectedDedup = "<name>.algorand-<network>.network"
+ dnsBootstrap, err := parseDNSBootstrap(primaryDomain+"?backup="+backupDomain+"&dedup="+defaultExpectedDedup,
+ network, true)
+
+ assert.NoError(t, err)
+
+ assert.True(t, strings.EqualFold(primaryDomain, dnsBootstrap.PrimarySRVBootstrap))
+ assert.True(t, strings.EqualFold(backupDomain, dnsBootstrap.BackupSRVBootstrap))
+ assert.Equal(t,
+ strings.Replace("(algorand-<network>.network)", "<network>", string(network), -1),
+ dnsBootstrap.DedupExp.String())
+ })
+}
+
+func TestParseDNSBootstrapIDEmptySpaceURLsRejected(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ _, err := parseDNSBootstrap(" ", Testnet, false)
+ assert.EqualError(t, err, bootstrapErrorEmpty)
+
+ _, err2 := parseDNSBootstrap("", Mainnet, false)
+ assert.EqualError(t, err2, bootstrapErrorEmpty)
+}
+
+func TestParseDNSBootstrapIDInvalidURLsRejected(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ _, err := parseDNSBootstrap("algo@%%@api^^.google.com/q?backup=api.google.net", Mainnet, false)
+
+ assert.ErrorContains(t, err, bootstrapErrorInvalidFormat)
+}
+
+func TestParseDNSBootstrapIDInvalidQueryParamsRejected(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ _, err := parseDNSBootstrap("http://api.google.com/q?backup=api.google.net&dedup=%%b", Mainnet, false)
+
+ assert.ErrorContains(t, err, bootstrapErrorParsingQueryParams)
+}
+
+func TestParseDNSBootstrapIDInvalidNameMacroPosition(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var dnsBootstrapIDWithInvalidNameMacroUsage = "<network>.algorand.network?backup=<network>.algorand.net&dedup=algorand-<name>.algorand-<network>.(network|net)"
+
+ _, err := parseDNSBootstrap(dnsBootstrapIDWithInvalidNameMacroUsage, Mainnet, false)
+
+ assert.ErrorContains(t, err, bootstrapErrorInvalidNameMacroUsage)
+}
+
+func TestParseDNSBootstrapIDInvalidDedupRegex(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var dnsBootstrapIDWithInvalidNameMacroUsage = "<network>.algorand.network?backup=<network>.algorand.net&dedup=<name>.algorand-<network>.((network|net)"
+
+ _, err := parseDNSBootstrap(dnsBootstrapIDWithInvalidNameMacroUsage, Mainnet, false)
+
+ assert.ErrorContains(t, err, bootstrapDedupRegexDoesNotCompile)
+}
diff --git a/config/localTemplate.go b/config/localTemplate.go
index a9dd313bb..47cfa48ca 100644
--- a/config/localTemplate.go
+++ b/config/localTemplate.go
@@ -41,7 +41,7 @@ type Local struct {
// Version tracks the current version of the defaults so we can migrate old -> new
// This is specifically important whenever we decide to change the default value
// for an existing parameter. This field tag must be updated any time we add a new version.
- Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19" version[20]:"20" version[21]:"21" version[22]:"22" version[23]:"23" version[24]:"24" version[25]:"25" version[26]:"26" version[27]:"27"`
+ Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19" version[20]:"20" version[21]:"21" version[22]:"22" version[23]:"23" version[24]:"24" version[25]:"25" version[26]:"26" version[27]:"27" version[28]:"28"`
// environmental (may be overridden)
// When enabled, stores blocks indefinitely, otherwise, only the most recent blocks
@@ -120,7 +120,7 @@ type Local struct {
RestWriteTimeoutSeconds int `version[4]:"120"`
// SRV-based phonebook
- DNSBootstrapID string `version[0]:"<network>.algorand.network"`
+ DNSBootstrapID string `version[0]:"<network>.algorand.network" version[28]:"<network>.algorand.network?backup=<network>.algorand.net&dedup=<name>.algorand-<network>.(network|net)"`
// Log file size limit in bytes. When set to 0 logs will be written to stdout.
LogSizeLimit uint64 `version[0]:"1073741824"`
@@ -237,10 +237,6 @@ type Local struct {
// the max size the sync server would return
TxSyncServeResponseSize int `version[3]:"1000000"`
- // IsIndexerActive indicates whether to activate the indexer for fast retrieval of transactions
- // Note -- Indexer cannot operate on non Archival nodes
- IsIndexerActive bool `version[3]:"false"`
-
// UseXForwardedForAddress indicates whether or not the node should use the X-Forwarded-For HTTP Header when
// determining the source of a connection. If used, it should be set to the string "X-Forwarded-For", unless the
// proxy vendor provides another header field. In the case of CloudFlare proxy, the "CF-Connecting-IP" header
@@ -365,7 +361,7 @@ type Local struct {
// MaxCatchpointDownloadDuration defines the maximum duration a client will be keeping the outgoing connection of a catchpoint download request open for processing before
// shutting it down. Networks that have large catchpoint files, slow connection or slow storage could be a good reason to increase this value. Note that this is a client-side only
// configuration value, and it's independent of the actual catchpoint file size.
- MaxCatchpointDownloadDuration time.Duration `version[13]:"7200000000000"`
+ MaxCatchpointDownloadDuration time.Duration `version[13]:"7200000000000" version[28]:"43200000000000"`
// MinCatchpointFileDownloadBytesPerSecond defines the minimal download speed that would be considered to be "acceptable" by the catchpoint file fetcher, measured in bytes per seconds. If the
// provided stream speed drops below this threshold, the connection would be recycled. Note that this field is evaluated per catchpoint "chunk" and not on it's own. If this field is zero,
@@ -510,36 +506,55 @@ type Local struct {
// EnableTxnEvalTracer turns on features in the BlockEvaluator which collect data on transactions, exposing them via algod APIs.
// It will store txn deltas created during block evaluation, potentially consuming much larger amounts of memory,
EnableTxnEvalTracer bool `version[27]:"false"`
+
+ // StorageEngine allows to control which type of storage to use for the ledger.
+ // Available options are:
+ // - sqlite (default)
+ // - pebbledb (experimental, in development)
+ StorageEngine string `version[28]:"sqlite"`
+
+ // TxIncomingFilterMaxSize sets the maximum size for the de-duplication cache used by the incoming tx filter
+ // only relevant if TxIncomingFilteringFlags is non-zero
+ TxIncomingFilterMaxSize uint64 `version[28]:"500000"`
+
+ // BlockServiceMemCap is the memory capacity in bytes which is allowed for the block service to use for HTTP block requests.
+ // When it exceeds this capacity, it redirects the block requests to a different node
+ BlockServiceMemCap uint64 `version[28]:"500000000"`
}
// DNSBootstrapArray returns an array of one or more DNS Bootstrap identifiers
-func (cfg Local) DNSBootstrapArray(networkID protocol.NetworkID) (bootstrapArray []string) {
- dnsBootstrapString := cfg.DNSBootstrap(networkID)
- bootstrapArray = strings.Split(dnsBootstrapString, ";")
- // omit zero length entries from the result set.
- for i := len(bootstrapArray) - 1; i >= 0; i-- {
- if len(bootstrapArray[i]) == 0 {
- bootstrapArray = append(bootstrapArray[:i], bootstrapArray[i+1:]...)
- }
- }
- return
+func (cfg Local) DNSBootstrapArray(networkID protocol.NetworkID) []*DNSBootstrap {
+ // Should never return an error here, as the config has already been validated at init
+ result, _ := cfg.internalValidateDNSBootstrapArray(networkID)
+
+ return result
}
-// DNSBootstrap returns the network-specific DNSBootstrap identifier
-func (cfg Local) DNSBootstrap(network protocol.NetworkID) string {
- // if user hasn't modified the default DNSBootstrapID in the configuration
- // file and we're targeting a devnet ( via genesis file ), we the
- // explicit devnet network bootstrap.
- if defaultLocal.DNSBootstrapID == cfg.DNSBootstrapID {
- if network == Devnet {
- return "devnet.algodev.network"
- } else if network == Betanet {
- return "betanet.algodev.network"
- } else if network == Alphanet {
- return "alphanet.algodev.network"
+// ValidateDNSBootstrapArray returns an array of one or more DNS Bootstrap identifiers or an error if any
+// one fails to parse
+func (cfg Local) ValidateDNSBootstrapArray(networkID protocol.NetworkID) ([]*DNSBootstrap, error) {
+ return cfg.internalValidateDNSBootstrapArray(networkID)
+}
+
+// internalValidateDNSBootstrapArray handles the base functionality of parsing the DNSBootstrapID string.
+// The function will return an error on the first failure encountered, or an array of DNSBootstrap entries.
+func (cfg Local) internalValidateDNSBootstrapArray(networkID protocol.NetworkID) (
+ bootstrapArray []*DNSBootstrap, err error) {
+
+ bootstrapStringArray := strings.Split(cfg.DNSBootstrapID, ";")
+ for _, bootstrapString := range bootstrapStringArray {
+ if len(strings.TrimSpace(bootstrapString)) == 0 {
+ continue
}
+
+ bootstrapEntry, err1 := parseDNSBootstrap(bootstrapString, networkID, defaultLocal.DNSBootstrapID != cfg.DNSBootstrapID)
+ if err1 != nil {
+ return nil, err1
+ }
+
+ bootstrapArray = append(bootstrapArray, bootstrapEntry)
}
- return strings.Replace(cfg.DNSBootstrapID, "<network>", string(network), -1)
+ return
}
// SaveToDisk writes the non-default Local settings into a root/ConfigFilename file
diff --git a/config/local_defaults.go b/config/local_defaults.go
index 81e4a2587..a721cd8eb 100644
--- a/config/local_defaults.go
+++ b/config/local_defaults.go
@@ -20,7 +20,7 @@
package config
var defaultLocal = Local{
- Version: 27,
+ Version: 28,
AccountUpdatesStatsInterval: 5000000000,
AccountsRebuildSynchronousMode: 1,
AgreementIncomingBundlesQueueLength: 15,
@@ -30,6 +30,7 @@ var defaultLocal = Local{
Archival: false,
BaseLoggerDebugLevel: 4,
BlockServiceCustomFallbackEndpoints: "",
+ BlockServiceMemCap: 500000000,
BroadcastConnectionsLimit: -1,
CadaverDirectory: "",
CadaverSizeTarget: 0,
@@ -45,7 +46,7 @@ var defaultLocal = Local{
CatchupParallelBlocks: 16,
ConnectionsRateLimitingCount: 60,
ConnectionsRateLimitingWindowSeconds: 1,
- DNSBootstrapID: "<network>.algorand.network",
+ DNSBootstrapID: "<network>.algorand.network?backup=<network>.algorand.net&dedup=<name>.algorand-<network>.(network|net)",
DNSSecurityFlags: 1,
DeadlockDetection: 0,
DeadlockDetectionThreshold: 30,
@@ -87,7 +88,6 @@ var defaultLocal = Local{
IncomingConnectionsLimit: 2400,
IncomingMessageFilterBucketCount: 5,
IncomingMessageFilterBucketSize: 512,
- IsIndexerActive: false,
LedgerSynchronousMode: 2,
LogArchiveMaxAge: "",
LogArchiveName: "node.archive.log",
@@ -95,7 +95,7 @@ var defaultLocal = Local{
MaxAPIBoxPerApplication: 100000,
MaxAPIResourcesPerAccount: 100000,
MaxAcctLookback: 4,
- MaxCatchpointDownloadDuration: 7200000000000,
+ MaxCatchpointDownloadDuration: 43200000000000,
MaxConnectionsPerIP: 15,
MinCatchpointFileDownloadBytesPerSecond: 20480,
NetAddress: "",
@@ -119,6 +119,7 @@ var defaultLocal = Local{
RestReadTimeoutSeconds: 15,
RestWriteTimeoutSeconds: 120,
RunHosted: false,
+ StorageEngine: "sqlite",
SuggestedFeeBlockHistory: 3,
SuggestedFeeSlidingWindowSize: 50,
TLSCertFile: "",
@@ -129,6 +130,7 @@ var defaultLocal = Local{
TxBacklogReservedCapacityPerPeer: 20,
TxBacklogServiceRateWindowSeconds: 10,
TxBacklogSize: 26000,
+ TxIncomingFilterMaxSize: 500000,
TxIncomingFilteringFlags: 1,
TxPoolExponentialIncreaseFactor: 2,
TxPoolSize: 75000,
diff --git a/config/migrate.go b/config/migrate.go
index 11c8aa108..ae9e2a9a1 100644
--- a/config/migrate.go
+++ b/config/migrate.go
@@ -22,12 +22,12 @@ import (
"strconv"
)
-//go:generate $GOROOT/bin/go run ./defaultsGenerator/defaultsGenerator.go -h ../scripts/LICENSE_HEADER -p config -o ./local_defaults.go -j ../installer/config.json.example
+//go:generate $GOROOT/bin/go run ./defaultsGenerator/defaultsGenerator.go -h ../scripts/LICENSE_HEADER -p config -o ./local_defaults.go -j ../installer/config.json.example -t ../test/testdata/configs/config-v%d.json
//go:generate $GOROOT/bin/go fmt local_defaults.go
// AutogenLocal - this variable is the "input" for the config default generator which automatically updates the above defaultLocal variable.
// it's implemented in ./config/defaults_gen.go, and should be the only "consumer" of this exported variable
-var AutogenLocal = getVersionedDefaultLocalConfig(getLatestConfigVersion())
+var AutogenLocal = GetVersionedDefaultLocalConfig(getLatestConfigVersion())
func migrate(cfg Local) (newCfg Local, err error) {
newCfg = cfg
@@ -42,7 +42,7 @@ func migrate(cfg Local) (newCfg Local, err error) {
if newCfg.Version == latestConfigVersion {
break
}
- defaultCurrentConfig := getVersionedDefaultLocalConfig(newCfg.Version)
+ defaultCurrentConfig := GetVersionedDefaultLocalConfig(newCfg.Version)
localType := reflect.TypeOf(Local{})
nextVersion := newCfg.Version + 1
for fieldNum := 0; fieldNum < localType.NumField(); fieldNum++ {
@@ -127,9 +127,10 @@ func getLatestConfigVersion() uint32 {
}
}
-func getVersionedDefaultLocalConfig(version uint32) (local Local) {
+// GetVersionedDefaultLocalConfig returns the default config for the given version.
+func GetVersionedDefaultLocalConfig(version uint32) (local Local) {
if version > 0 {
- local = getVersionedDefaultLocalConfig(version - 1)
+ local = GetVersionedDefaultLocalConfig(version - 1)
}
// apply version specific changes.
localType := reflect.TypeOf(local)
diff --git a/config/version.go b/config/version.go
index 2e274a552..c99256a2f 100644
--- a/config/version.go
+++ b/config/version.go
@@ -33,7 +33,7 @@ const VersionMajor = 3
// VersionMinor is the Minor semantic version number (x.#.z) - changed when backwards-compatible features are introduced.
// Not enforced until after initial public release (x > 0).
-const VersionMinor = 16
+const VersionMinor = 17
// Version is the type holding our full version information.
type Version struct {
diff --git a/crypto/curve25519.go b/crypto/curve25519.go
index e409fc794..58950a3de 100644
--- a/crypto/curve25519.go
+++ b/crypto/curve25519.go
@@ -210,7 +210,6 @@ func (s *SignatureSecrets) SignBytes(message []byte) Signature {
// signed a Hashable message.
//
// It returns true if this is the case; otherwise, it returns false.
-//
func (v SignatureVerifier) Verify(message Hashable, sig Signature) bool {
cryptoSigSecretsVerifyTotal.Inc(nil)
return ed25519Verify(ed25519PublicKey(v), HashRep(message), ed25519Signature(sig))
diff --git a/crypto/digest.go b/crypto/digest.go
index 27bef9a3c..d6f3c2777 100644
--- a/crypto/digest.go
+++ b/crypto/digest.go
@@ -19,6 +19,7 @@ package crypto
import "bytes"
// GenericDigest is a digest that implements CustomSizeDigest, and can be used as hash output.
+//
//msgp:allocbound GenericDigest MaxHashDigestSize
type GenericDigest []byte
diff --git a/crypto/hashes.go b/crypto/hashes.go
index b1a541bba..8933717e4 100644
--- a/crypto/hashes.go
+++ b/crypto/hashes.go
@@ -50,7 +50,7 @@ const (
// a longer output is introduced.
const MaxHashDigestSize = SumhashDigestSize
-//size of each hash
+// size of each hash
const (
Sha512_256Size = sha512.Size256
SumhashDigestSize = sumhash.Sumhash512DigestSize
@@ -58,6 +58,7 @@ const (
)
// HashFactory is responsible for generating new hashes accordingly to the type it stores.
+//
//msgp:postunmarshalcheck HashFactory Validate
type HashFactory struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
diff --git a/crypto/merklearray/layer.go b/crypto/merklearray/layer.go
index 45aa93ff8..5018ae074 100644
--- a/crypto/merklearray/layer.go
+++ b/crypto/merklearray/layer.go
@@ -26,6 +26,7 @@ import (
// A Layer of the Merkle tree consists of a dense array of hashes at that
// level of the tree. Hashes beyond the end of the array (e.g., if the
// number of leaves is not an exact power of 2) are implicitly zero.
+//
//msgp:allocbound Layer MaxNumLeavesOnEncodedTree
type Layer []crypto.GenericDigest
diff --git a/crypto/merklearray/merkle.go b/crypto/merklearray/merkle.go
index b4591bf6b..203d92110 100644
--- a/crypto/merklearray/merkle.go
+++ b/crypto/merklearray/merkle.go
@@ -24,6 +24,7 @@ import (
"sort"
"github.com/algorand/go-algorand/crypto"
+ "golang.org/x/exp/slices"
)
const (
@@ -223,7 +224,7 @@ func (tree *Tree) Prove(idxs []uint64) (*Proof, error) {
idxs = VcIdxs
}
- sort.Slice(idxs, func(i, j int) bool { return idxs[i] < idxs[j] })
+ slices.Sort(idxs)
return tree.createProof(idxs)
}
diff --git a/crypto/merklearray/msgp_gen.go b/crypto/merklearray/msgp_gen.go
index ba7e4a61c..35aa264fb 100644
--- a/crypto/merklearray/msgp_gen.go
+++ b/crypto/merklearray/msgp_gen.go
@@ -16,6 +16,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> LayerMaxSize()
//
// Proof
// |-----> (*) MarshalMsg
@@ -24,6 +25,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> ProofMaxSize()
//
// SingleLeafProof
// |-----> (*) MarshalMsg
@@ -40,6 +42,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> TreeMaxSize()
//
// MarshalMsg implements msgp.Marshaler
@@ -115,6 +118,13 @@ func (z Layer) MsgIsZero() bool {
return len(z) == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func LayerMaxSize() (s int) {
+ // Calculating size of slice: z
+ s += msgp.ArrayHeaderSize + ((MaxNumLeavesOnEncodedTree) * (crypto.GenericDigestMaxSize()))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *Proof) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -320,6 +330,15 @@ func (z *Proof) MsgIsZero() bool {
return (len((*z).Path) == 0) && ((*z).HashFactory.MsgIsZero()) && ((*z).TreeDepth == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func ProofMaxSize() (s int) {
+ s = 1 + 4
+ // Calculating size of slice: z.Path
+ s += msgp.ArrayHeaderSize + ((MaxNumLeavesOnEncodedTree / 2) * (crypto.GenericDigestMaxSize()))
+ s += 4 + crypto.HashFactoryMaxSize() + 3 + msgp.Uint8Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *SingleLeafProof) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -804,3 +823,12 @@ func (z *Tree) Msgsize() (s int) {
func (z *Tree) MsgIsZero() bool {
return (len((*z).Levels) == 0) && ((*z).NumOfElements == 0) && ((*z).Hash.MsgIsZero()) && ((*z).IsVectorCommitment == false)
}
+
+// MaxSize returns a maximum valid message size for this message type
+func TreeMaxSize() (s int) {
+ s = 1 + 5
+ // Calculating size of slice: z.Levels
+ s += msgp.ArrayHeaderSize + ((MaxEncodedTreeDepth + 1) * (MaxNumLeavesOnEncodedTree * (crypto.GenericDigestMaxSize())))
+ s += 3 + msgp.Uint64Size + 4 + crypto.HashFactoryMaxSize() + 3 + msgp.BoolSize
+ return
+}
diff --git a/crypto/merklearray/partial.go b/crypto/merklearray/partial.go
index a1d0861dc..4baf777f3 100644
--- a/crypto/merklearray/partial.go
+++ b/crypto/merklearray/partial.go
@@ -62,6 +62,7 @@ func (s *siblings) get(l uint64, i uint64) (res crypto.GenericDigest, err error)
// partialLayer represents a subset of a Layer (i.e., nodes at some
// level in the Merkle tree). layerItem represents one element in the
// partial Layer.
+//
//msgp:ignore partialLayer
type partialLayer []layerItem
diff --git a/crypto/merklearray/proof.go b/crypto/merklearray/proof.go
index e64479a96..048931203 100644
--- a/crypto/merklearray/proof.go
+++ b/crypto/merklearray/proof.go
@@ -20,11 +20,38 @@ import (
"fmt"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/msgp/msgp"
)
// Proof is used to convince a verifier about membership of leaves: h0,h1...hn
// at indexes i0,i1...in on a tree. The verifier has a trusted value of the tree
// root hash.
+// Path is bounded by MaxNumLeaves since there could be multiple reveals, and
+// given the distribution of the elt positions and the depth of the tree,
+// the path length can increase up to 2^MaxTreeDepth / 2
+//
+// . Consider two different reveals for the same tree:
+// .
+// . z5
+// . z3 z4
+// . y z z1 z2
+// . q r s t u v w x
+// . a b c d e f g h i j k l m n o p
+// . ^
+// . hints: [a, r, z, z4]
+// . len(hints) = 4
+// . You need a to combine with b to get q, need r to combine with the computed q and get y, and so on.
+// . The worst case is this:
+// .
+// . z5
+// . z3 z4
+// . y z z1 z2
+// . q r s t u v w x
+// . a b c d e f g h i j k l m n o p
+// . ^ ^ ^ ^ ^ ^ ^ ^
+// .
+// . hints: [b, d, e, g, j, l, m, o]
+// . len(hints) = 2^4/2
type Proof struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
@@ -41,12 +68,34 @@ type Proof struct {
// SingleLeafProof is used to convince a verifier about membership of a specific
// leaf h at index i on a tree. The verifier has a trusted value of the tree
// root hash. it corresponds to merkle verification path.
+// The msgp directive belows tells msgp to not generate SingleLeafProofMaxSize method since we have one manually defined in this file.
+//
+//msgp:maxsize ignore SingleLeafProof
type SingleLeafProof struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
Proof
}
+// ProofMaxSizeByElements returns the maximum msgp encoded size of merklearray.Proof structs containing n signatures
+// This is necessary because the allocbounds on the proof are actual theoretical bounds but for calculating maximum
+// proof size for individual message types we have smaller valid bounds.
+// Exported because it's used in the stateproof package for ensuring that SigPartProof constant is correct size.
+func ProofMaxSizeByElements(n int) (s int) {
+ s = 1 + 4
+ // Calculating size of slice: z.Path
+ s += msgp.ArrayHeaderSize + n*(crypto.GenericDigestMaxSize())
+ s += 4 + crypto.HashFactoryMaxSize() + 3 + msgp.Uint8Size
+ return
+}
+
+// SingleLeafProofMaxSize returns the maximum msgp encoded size of proof verifying a single leaf
+// It is manually defined here instead of letting msgp do it since we cannot annotate the embedded Proof struct
+// with maxtotalbytes for msgp to autogenerate it.
+func SingleLeafProofMaxSize() int {
+ return ProofMaxSizeByElements(MaxEncodedTreeDepth)
+}
+
// GetFixedLengthHashableRepresentation serializes the proof into a sequence of bytes.
// it basically concatenates the elements of the verification path one after another.
// The function returns a fixed length array for each hash function. which is 1 + MaxEncodedTreeDepth * digestsize
diff --git a/crypto/merklearray/proof_test.go b/crypto/merklearray/proof_test.go
index d3683689f..9591d6ee4 100644
--- a/crypto/merklearray/proof_test.go
+++ b/crypto/merklearray/proof_test.go
@@ -244,3 +244,12 @@ func recomputePath(p []byte) []crypto.GenericDigest {
}
return computedPath
}
+
+func TestMaxSizeCalculation(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // Ensure that the manually generated ProofMaxSizeByElements matches the autogenerated ProofMaxSize() function
+ // If this breaks either the allocbound has changed or the Proof struct definition has changed
+ require.Equal(t, ProofMaxSizeByElements(MaxNumLeavesOnEncodedTree/2), ProofMaxSize())
+}
diff --git a/crypto/merklesignature/merkleSignatureScheme_test.go b/crypto/merklesignature/merkleSignatureScheme_test.go
index 9b56cb11d..b2c82d42d 100644
--- a/crypto/merklesignature/merkleSignatureScheme_test.go
+++ b/crypto/merklesignature/merkleSignatureScheme_test.go
@@ -509,7 +509,7 @@ func TestGetAllKeys(t *testing.T) {
a.Equal(0, len(keys))
}
-//#region Helper Functions
+// #region Helper Functions
func makeSig(signer *Secrets, sigRound uint64, a *require.Assertions) ([]byte, Signature) {
msg := genMsgForTest()
diff --git a/crypto/merklesignature/msgp_gen.go b/crypto/merklesignature/msgp_gen.go
index ae33fa175..cd8a00d4e 100644
--- a/crypto/merklesignature/msgp_gen.go
+++ b/crypto/merklesignature/msgp_gen.go
@@ -6,6 +6,7 @@ import (
"github.com/algorand/msgp/msgp"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklearray"
)
// The following msgp objects are implemented in this file:
@@ -16,6 +17,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> CommitmentMaxSize()
//
// KeyRoundPair
// |-----> (*) MarshalMsg
@@ -24,6 +26,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> KeyRoundPairMaxSize()
//
// Secrets
// |-----> (*) MarshalMsg
@@ -32,6 +35,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> SecretsMaxSize()
//
// Signature
// |-----> (*) MarshalMsg
@@ -40,6 +44,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> SignatureMaxSize()
//
// SignerContext
// |-----> (*) MarshalMsg
@@ -48,6 +53,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> SignerContextMaxSize()
//
// Verifier
// |-----> (*) MarshalMsg
@@ -56,6 +62,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> VerifierMaxSize()
//
// MarshalMsg implements msgp.Marshaler
@@ -97,6 +104,13 @@ func (z *Commitment) MsgIsZero() bool {
return (*z) == (Commitment{})
}
+// MaxSize returns a maximum valid message size for this message type
+func CommitmentMaxSize() (s int) {
+ // Calculating size of array: z
+ s = msgp.ArrayHeaderSize + ((MerkleSignatureSchemeRootSize) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *KeyRoundPair) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -257,6 +271,13 @@ func (z *KeyRoundPair) MsgIsZero() bool {
return ((*z).Round == 0) && ((*z).Key == nil)
}
+// MaxSize returns a maximum valid message size for this message type
+func KeyRoundPairMaxSize() (s int) {
+ s = 1 + 4 + msgp.Uint64Size + 4
+ s += crypto.FalconSignerMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *Secrets) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -409,6 +430,12 @@ func (z *Secrets) MsgIsZero() bool {
return ((*z).SignerContext.FirstValid == 0) && ((*z).SignerContext.KeyLifetime == 0) && ((*z).SignerContext.Tree.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func SecretsMaxSize() (s int) {
+ s = 1 + 3 + msgp.Uint64Size + 3 + msgp.Uint64Size + 5 + merklearray.TreeMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *Signature) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -584,6 +611,12 @@ func (z *Signature) MsgIsZero() bool {
return ((*z).Signature.MsgIsZero()) && ((*z).VectorCommitmentIndex == 0) && ((*z).Proof.MsgIsZero()) && ((*z).VerifyingKey.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func SignatureMaxSize() (s int) {
+ s = 1 + 4 + crypto.FalconSignatureMaxSize() + 4 + msgp.Uint64Size + 4 + merklearray.SingleLeafProofMaxSize() + 5 + crypto.FalconVerifierMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *SignerContext) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -736,6 +769,12 @@ func (z *SignerContext) MsgIsZero() bool {
return ((*z).FirstValid == 0) && ((*z).KeyLifetime == 0) && ((*z).Tree.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func SignerContextMaxSize() (s int) {
+ s = 1 + 3 + msgp.Uint64Size + 3 + msgp.Uint64Size + 5 + merklearray.TreeMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *Verifier) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -864,3 +903,12 @@ func (z *Verifier) Msgsize() (s int) {
func (z *Verifier) MsgIsZero() bool {
return ((*z).Commitment == (Commitment{})) && ((*z).KeyLifetime == 0)
}
+
+// MaxSize returns a maximum valid message size for this message type
+func VerifierMaxSize() (s int) {
+ s = 1 + 4
+ // Calculating size of array: z.Commitment
+ s += msgp.ArrayHeaderSize + ((MerkleSignatureSchemeRootSize) * (msgp.ByteSize))
+ s += 3 + msgp.Uint64Size
+ return
+}
diff --git a/crypto/merkletrie/cache.go b/crypto/merkletrie/cache.go
index 01648d73a..453e51645 100644
--- a/crypto/merkletrie/cache.go
+++ b/crypto/merkletrie/cache.go
@@ -21,7 +21,9 @@ import (
"encoding/binary"
"errors"
"fmt"
- "sort"
+
+ "golang.org/x/exp/maps"
+ "golang.org/x/exp/slices"
)
// storedNodeIdentifier is the "equivalent" of a node-ptr, but oriented around persisting the
@@ -446,11 +448,8 @@ func (mtc *merkleTrieCache) reallocatePendingPages(stats *CommitStats) (pagesToC
}
// create a sorted list of created pages
- sortedCreatedPages := make([]uint64, 0, len(createdPages))
- for page := range createdPages {
- sortedCreatedPages = append(sortedCreatedPages, page)
- }
- sort.SliceStable(sortedCreatedPages, func(i, j int) bool { return sortedCreatedPages[i] < sortedCreatedPages[j] })
+ sortedCreatedPages := maps.Keys(createdPages)
+ slices.Sort(sortedCreatedPages)
mtc.reallocatedPages = make(map[uint64]map[storedNodeIdentifier]*node)
diff --git a/crypto/merkletrie/committer.go b/crypto/merkletrie/committer.go
index 5e5ae758a..305ce1f66 100644
--- a/crypto/merkletrie/committer.go
+++ b/crypto/merkletrie/committer.go
@@ -16,6 +16,8 @@
package merkletrie
+import "golang.org/x/exp/slices"
+
// Committer is the interface supporting serializing tries into persistent storage.
type Committer interface {
StorePage(page uint64, content []byte) error
@@ -40,9 +42,7 @@ func (mc *InMemoryCommitter) StorePage(page uint64, content []byte) error {
if content == nil {
delete(mc.memStore, page)
} else {
- storedContent := make([]byte, len(content))
- copy(storedContent, content)
- mc.memStore[page] = storedContent
+ mc.memStore[page] = slices.Clone(content)
}
return nil
}
diff --git a/crypto/merkletrie/committer_test.go b/crypto/merkletrie/committer_test.go
index c8bfd7143..317261f72 100644
--- a/crypto/merkletrie/committer_test.go
+++ b/crypto/merkletrie/committer_test.go
@@ -21,6 +21,7 @@ import (
"testing"
"github.com/stretchr/testify/require"
+ "golang.org/x/exp/slices"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/test/partitiontest"
@@ -33,9 +34,7 @@ func (mc *InMemoryCommitter) Duplicate(flat bool) (out *InMemoryCommitter) {
if flat {
out.memStore[k] = v
} else {
- bytes := make([]byte, len(v))
- copy(bytes[:], v[:])
- out.memStore[k] = bytes
+ out.memStore[k] = slices.Clone(v)
}
}
return
diff --git a/crypto/merkletrie/node.go b/crypto/merkletrie/node.go
index de765793d..972ef0e1e 100644
--- a/crypto/merkletrie/node.go
+++ b/crypto/merkletrie/node.go
@@ -23,6 +23,7 @@ import (
"unsafe"
"github.com/algorand/go-algorand/crypto"
+ "golang.org/x/exp/slices"
)
type childEntry struct {
@@ -339,8 +340,7 @@ func deserializeNode(buf []byte) (n *node, s int) {
if hashLength2 <= 0 {
return nil, hashLength2
}
- n.hash = make([]byte, hashLength)
- copy(n.hash, buf[hashLength2:hashLength2+int(hashLength)])
+ n.hash = slices.Clone(buf[hashLength2 : hashLength2+int(hashLength)])
s = hashLength2 + int(hashLength)
isLeaf := (buf[s] == 0)
s++
diff --git a/crypto/msgp_gen.go b/crypto/msgp_gen.go
index fc7539b82..7d7364de8 100644
--- a/crypto/msgp_gen.go
+++ b/crypto/msgp_gen.go
@@ -18,6 +18,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> DigestMaxSize()
//
// FalconPrivateKey
// |-----> (*) MarshalMsg
@@ -26,6 +27,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> FalconPrivateKeyMaxSize()
//
// FalconPublicKey
// |-----> (*) MarshalMsg
@@ -34,6 +36,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> FalconPublicKeyMaxSize()
//
// FalconSeed
// |-----> (*) MarshalMsg
@@ -42,6 +45,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> FalconSeedMaxSize()
//
// FalconSignature
// |-----> MarshalMsg
@@ -50,6 +54,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> FalconSignatureMaxSize()
//
// FalconSigner
// |-----> (*) MarshalMsg
@@ -58,6 +63,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> FalconSignerMaxSize()
//
// FalconVerifier
// |-----> (*) MarshalMsg
@@ -66,6 +72,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> FalconVerifierMaxSize()
//
// GenericDigest
// |-----> MarshalMsg
@@ -74,6 +81,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> GenericDigestMaxSize()
//
// HashFactory
// |-----> (*) MarshalMsg
@@ -82,6 +90,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> HashFactoryMaxSize()
//
// HashType
// |-----> MarshalMsg
@@ -90,6 +99,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> HashTypeMaxSize()
//
// MasterDerivationKey
// |-----> (*) MarshalMsg
@@ -98,6 +108,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> MasterDerivationKeyMaxSize()
//
// MultisigSig
// |-----> (*) MarshalMsg
@@ -106,6 +117,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> MultisigSigMaxSize()
//
// MultisigSubsig
// |-----> (*) MarshalMsg
@@ -114,6 +126,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> MultisigSubsigMaxSize()
//
// OneTimeSignature
// |-----> (*) MarshalMsg
@@ -122,6 +135,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> OneTimeSignatureMaxSize()
//
// OneTimeSignatureSecrets
// |-----> (*) MarshalMsg
@@ -130,6 +144,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> OneTimeSignatureSecretsMaxSize()
//
// OneTimeSignatureSecretsPersistent
// |-----> (*) MarshalMsg
@@ -138,6 +153,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> OneTimeSignatureSecretsPersistentMaxSize()
//
// OneTimeSignatureSubkeyBatchID
// |-----> (*) MarshalMsg
@@ -146,6 +162,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> OneTimeSignatureSubkeyBatchIDMaxSize()
//
// OneTimeSignatureSubkeyOffsetID
// |-----> (*) MarshalMsg
@@ -154,6 +171,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> OneTimeSignatureSubkeyOffsetIDMaxSize()
//
// OneTimeSignatureVerifier
// |-----> (*) MarshalMsg
@@ -162,6 +180,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> OneTimeSignatureVerifierMaxSize()
//
// PrivateKey
// |-----> (*) MarshalMsg
@@ -170,6 +189,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> PrivateKeyMaxSize()
//
// PublicKey
// |-----> (*) MarshalMsg
@@ -178,6 +198,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> PublicKeyMaxSize()
//
// Seed
// |-----> (*) MarshalMsg
@@ -186,6 +207,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> SeedMaxSize()
//
// Signature
// |-----> (*) MarshalMsg
@@ -194,6 +216,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> SignatureMaxSize()
//
// SignatureSecrets
// |-----> (*) MarshalMsg
@@ -202,6 +225,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> SignatureSecretsMaxSize()
//
// VRFSecrets
// |-----> (*) MarshalMsg
@@ -210,6 +234,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> VRFSecretsMaxSize()
//
// VrfOutput
// |-----> (*) MarshalMsg
@@ -218,6 +243,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> VrfOutputMaxSize()
//
// VrfPrivkey
// |-----> (*) MarshalMsg
@@ -226,6 +252,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> VrfPrivkeyMaxSize()
//
// VrfProof
// |-----> (*) MarshalMsg
@@ -234,6 +261,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> VrfProofMaxSize()
//
// VrfPubkey
// |-----> (*) MarshalMsg
@@ -242,6 +270,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> VrfPubkeyMaxSize()
//
// ed25519PrivateKey
// |-----> (*) MarshalMsg
@@ -250,6 +279,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> Ed25519PrivateKeyMaxSize()
//
// ed25519PublicKey
// |-----> (*) MarshalMsg
@@ -258,6 +288,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> Ed25519PublicKeyMaxSize()
//
// ed25519Seed
// |-----> (*) MarshalMsg
@@ -266,6 +297,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> Ed25519SeedMaxSize()
//
// ed25519Signature
// |-----> (*) MarshalMsg
@@ -274,6 +306,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> Ed25519SignatureMaxSize()
//
// ephemeralSubkey
// |-----> (*) MarshalMsg
@@ -282,6 +315,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> EphemeralSubkeyMaxSize()
//
// MarshalMsg implements msgp.Marshaler
@@ -323,6 +357,13 @@ func (z *Digest) MsgIsZero() bool {
return (*z) == (Digest{})
}
+// MaxSize returns a maximum valid message size for this message type
+func DigestMaxSize() (s int) {
+ // Calculating size of array: z
+ s = msgp.ArrayHeaderSize + ((DigestSize) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *FalconPrivateKey) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -362,6 +403,13 @@ func (z *FalconPrivateKey) MsgIsZero() bool {
return (*z) == (FalconPrivateKey{})
}
+// MaxSize returns a maximum valid message size for this message type
+func FalconPrivateKeyMaxSize() (s int) {
+ // Calculating size of array: z
+ s = msgp.ArrayHeaderSize + ((cfalcon.PrivateKeySize) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *FalconPublicKey) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -401,6 +449,13 @@ func (z *FalconPublicKey) MsgIsZero() bool {
return (*z) == (FalconPublicKey{})
}
+// MaxSize returns a maximum valid message size for this message type
+func FalconPublicKeyMaxSize() (s int) {
+ // Calculating size of array: z
+ s = msgp.ArrayHeaderSize + ((cfalcon.PublicKeySize) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *FalconSeed) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -440,6 +495,13 @@ func (z *FalconSeed) MsgIsZero() bool {
return (*z) == (FalconSeed{})
}
+// MaxSize returns a maximum valid message size for this message type
+func FalconSeedMaxSize() (s int) {
+ // Calculating size of array: z
+ s = msgp.ArrayHeaderSize + ((FalconSeedSize) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z FalconSignature) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -496,6 +558,12 @@ func (z FalconSignature) MsgIsZero() bool {
return len(z) == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func FalconSignatureMaxSize() (s int) {
+ s = msgp.BytesPrefixSize + FalconMaxSignatureSize
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *FalconSigner) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -625,6 +693,17 @@ func (z *FalconSigner) MsgIsZero() bool {
return ((*z).PublicKey == (FalconPublicKey{})) && ((*z).PrivateKey == (FalconPrivateKey{}))
}
+// MaxSize returns a maximum valid message size for this message type
+func FalconSignerMaxSize() (s int) {
+ s = 1 + 3
+ // Calculating size of array: z.PublicKey
+ s += msgp.ArrayHeaderSize + ((cfalcon.PublicKeySize) * (msgp.ByteSize))
+ s += 3
+ // Calculating size of array: z.PrivateKey
+ s += msgp.ArrayHeaderSize + ((cfalcon.PrivateKeySize) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *FalconVerifier) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -731,6 +810,14 @@ func (z *FalconVerifier) MsgIsZero() bool {
return ((*z).PublicKey == (FalconPublicKey{}))
}
+// MaxSize returns a maximum valid message size for this message type
+func FalconVerifierMaxSize() (s int) {
+ s = 1 + 2
+ // Calculating size of array: z.PublicKey
+ s += msgp.ArrayHeaderSize + ((cfalcon.PublicKeySize) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z GenericDigest) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -787,6 +874,12 @@ func (z GenericDigest) MsgIsZero() bool {
return len(z) == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func GenericDigestMaxSize() (s int) {
+ s = msgp.BytesPrefixSize + MaxHashDigestSize
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *HashFactory) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -904,6 +997,12 @@ func (z *HashFactory) MsgIsZero() bool {
return ((*z).HashType == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func HashFactoryMaxSize() (s int) {
+ s = 1 + 2 + msgp.Uint16Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z HashType) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -950,6 +1049,12 @@ func (z HashType) MsgIsZero() bool {
return z == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func HashTypeMaxSize() (s int) {
+ s = msgp.Uint16Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *MasterDerivationKey) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -989,6 +1094,13 @@ func (z *MasterDerivationKey) MsgIsZero() bool {
return (*z) == (MasterDerivationKey{})
}
+// MaxSize returns a maximum valid message size for this message type
+func MasterDerivationKeyMaxSize() (s int) {
+ // Calculating size of array: z
+ s = msgp.ArrayHeaderSize + ((masterDerivationKeyLenBytes) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *MultisigSig) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1193,6 +1305,14 @@ func (z *MultisigSig) MsgIsZero() bool {
return ((*z).Version == 0) && ((*z).Threshold == 0) && (len((*z).Subsigs) == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func MultisigSigMaxSize() (s int) {
+ s = 1 + 2 + msgp.Uint8Size + 4 + msgp.Uint8Size + 7
+ // Calculating size of slice: z.Subsigs
+ s += msgp.ArrayHeaderSize + ((maxMultisig) * (MultisigSubsigMaxSize()))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *MultisigSubsig) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1322,6 +1442,17 @@ func (z *MultisigSubsig) MsgIsZero() bool {
return ((*z).Key == (PublicKey{})) && ((*z).Sig == (Signature{}))
}
+// MaxSize returns a maximum valid message size for this message type
+func MultisigSubsigMaxSize() (s int) {
+ s = 1 + 3
+ // Calculating size of array: z.Key
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ s += 2
+ // Calculating size of array: z.Sig
+ s += msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *OneTimeSignature) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1501,6 +1632,29 @@ func (z *OneTimeSignature) MsgIsZero() bool {
return ((*z).Sig == (ed25519Signature{})) && ((*z).PK == (ed25519PublicKey{})) && ((*z).PKSigOld == (ed25519Signature{})) && ((*z).PK2 == (ed25519PublicKey{})) && ((*z).PK1Sig == (ed25519Signature{})) && ((*z).PK2Sig == (ed25519Signature{}))
}
+// MaxSize returns a maximum valid message size for this message type
+func OneTimeSignatureMaxSize() (s int) {
+ s = 1 + 2
+ // Calculating size of array: z.Sig
+ s += msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize))
+ s += 2
+ // Calculating size of array: z.PK
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ s += 3
+ // Calculating size of array: z.PKSigOld
+ s += msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize))
+ s += 3
+ // Calculating size of array: z.PK2
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ s += 4
+ // Calculating size of array: z.PK1Sig
+ s += msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize))
+ s += 4
+ // Calculating size of array: z.PK2Sig
+ s += msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *OneTimeSignatureSecrets) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1831,6 +1985,26 @@ func (z *OneTimeSignatureSecrets) MsgIsZero() bool {
return ((*z).OneTimeSignatureSecretsPersistent.OneTimeSignatureVerifier == (OneTimeSignatureVerifier{})) && ((*z).OneTimeSignatureSecretsPersistent.FirstBatch == 0) && (len((*z).OneTimeSignatureSecretsPersistent.Batches) == 0) && ((*z).OneTimeSignatureSecretsPersistent.FirstOffset == 0) && (len((*z).OneTimeSignatureSecretsPersistent.Offsets) == 0) && ((*z).OneTimeSignatureSecretsPersistent.OffsetsPK2 == (ed25519PublicKey{})) && ((*z).OneTimeSignatureSecretsPersistent.OffsetsPK2Sig == (ed25519Signature{}))
}
+// MaxSize returns a maximum valid message size for this message type
+func OneTimeSignatureSecretsMaxSize() (s int) {
+ s = 1 + 25
+ // Calculating size of array: z.OneTimeSignatureSecretsPersistent.OneTimeSignatureVerifier
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ s += 6 + msgp.Uint64Size + 4
+ // Calculating size of slice: z.OneTimeSignatureSecretsPersistent.Batches
+ panic("Slice z.OneTimeSignatureSecretsPersistent.Batches is unbounded")
+ s += 9 + msgp.Uint64Size + 8
+ // Calculating size of slice: z.OneTimeSignatureSecretsPersistent.Offsets
+ panic("Slice z.OneTimeSignatureSecretsPersistent.Offsets is unbounded")
+ s += 7
+ // Calculating size of array: z.OneTimeSignatureSecretsPersistent.OffsetsPK2
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ s += 10
+ // Calculating size of array: z.OneTimeSignatureSecretsPersistent.OffsetsPK2Sig
+ s += msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *OneTimeSignatureSecretsPersistent) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2161,6 +2335,26 @@ func (z *OneTimeSignatureSecretsPersistent) MsgIsZero() bool {
return ((*z).OneTimeSignatureVerifier == (OneTimeSignatureVerifier{})) && ((*z).FirstBatch == 0) && (len((*z).Batches) == 0) && ((*z).FirstOffset == 0) && (len((*z).Offsets) == 0) && ((*z).OffsetsPK2 == (ed25519PublicKey{})) && ((*z).OffsetsPK2Sig == (ed25519Signature{}))
}
+// MaxSize returns a maximum valid message size for this message type
+func OneTimeSignatureSecretsPersistentMaxSize() (s int) {
+ s = 1 + 25
+ // Calculating size of array: z.OneTimeSignatureVerifier
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ s += 6 + msgp.Uint64Size + 4
+ // Calculating size of slice: z.Batches
+ panic("Slice z.Batches is unbounded")
+ s += 9 + msgp.Uint64Size + 8
+ // Calculating size of slice: z.Offsets
+ panic("Slice z.Offsets is unbounded")
+ s += 7
+ // Calculating size of array: z.OffsetsPK2
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ s += 10
+ // Calculating size of array: z.OffsetsPK2Sig
+ s += msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *OneTimeSignatureSubkeyBatchID) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2272,6 +2466,15 @@ func (z *OneTimeSignatureSubkeyBatchID) MsgIsZero() bool {
return ((*z).SubKeyPK == (ed25519PublicKey{})) && ((*z).Batch == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func OneTimeSignatureSubkeyBatchIDMaxSize() (s int) {
+ s = 1 + 3
+ // Calculating size of array: z.SubKeyPK
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ s += 6 + msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *OneTimeSignatureSubkeyOffsetID) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2400,6 +2603,15 @@ func (z *OneTimeSignatureSubkeyOffsetID) MsgIsZero() bool {
return ((*z).SubKeyPK == (ed25519PublicKey{})) && ((*z).Batch == 0) && ((*z).Offset == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func OneTimeSignatureSubkeyOffsetIDMaxSize() (s int) {
+ s = 1 + 3
+ // Calculating size of array: z.SubKeyPK
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ s += 6 + msgp.Uint64Size + 4 + msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *OneTimeSignatureVerifier) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2439,6 +2651,13 @@ func (z *OneTimeSignatureVerifier) MsgIsZero() bool {
return (*z) == (OneTimeSignatureVerifier{})
}
+// MaxSize returns a maximum valid message size for this message type
+func OneTimeSignatureVerifierMaxSize() (s int) {
+ // Calculating size of array: z
+ s = msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *PrivateKey) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2478,6 +2697,13 @@ func (z *PrivateKey) MsgIsZero() bool {
return (*z) == (PrivateKey{})
}
+// MaxSize returns a maximum valid message size for this message type
+func PrivateKeyMaxSize() (s int) {
+ // Calculating size of array: z
+ s = msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *PublicKey) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2517,6 +2743,13 @@ func (z *PublicKey) MsgIsZero() bool {
return (*z) == (PublicKey{})
}
+// MaxSize returns a maximum valid message size for this message type
+func PublicKeyMaxSize() (s int) {
+ // Calculating size of array: z
+ s = msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *Seed) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2556,6 +2789,13 @@ func (z *Seed) MsgIsZero() bool {
return (*z) == (Seed{})
}
+// MaxSize returns a maximum valid message size for this message type
+func SeedMaxSize() (s int) {
+ // Calculating size of array: z
+ s = msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *Signature) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2595,6 +2835,13 @@ func (z *Signature) MsgIsZero() bool {
return (*z) == (Signature{})
}
+// MaxSize returns a maximum valid message size for this message type
+func SignatureMaxSize() (s int) {
+ // Calculating size of array: z
+ s = msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *SignatureSecrets) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2604,7 +2851,7 @@ func (z *SignatureSecrets) MarshalMsg(b []byte) (o []byte) {
o = msgp.AppendBytes(o, ((*z).SK)[:])
// string "SignatureVerifier"
o = append(o, 0xb1, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x72)
- o = (*z).SignatureVerifier.MarshalMsg(o)
+ o = msgp.AppendBytes(o, ((*z).SignatureVerifier)[:])
return
}
@@ -2617,33 +2864,33 @@ func (_ *SignatureSecrets) CanMarshalMsg(z interface{}) bool {
func (z *SignatureSecrets) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
- var zb0002 int
- var zb0003 bool
- zb0002, zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
- if zb0002 > 0 {
- zb0002--
- bts, err = (*z).SignatureVerifier.UnmarshalMsg(bts)
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = msgp.ReadExactBytes(bts, ((*z).SignatureVerifier)[:])
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "SignatureVerifier")
return
}
}
- if zb0002 > 0 {
- zb0002--
+ if zb0003 > 0 {
+ zb0003--
bts, err = msgp.ReadExactBytes(bts, ((*z).SK)[:])
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "SK")
return
}
}
- if zb0002 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0002)
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
return
@@ -2654,11 +2901,11 @@ func (z *SignatureSecrets) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err)
return
}
- if zb0003 {
+ if zb0004 {
(*z) = SignatureSecrets{}
}
- for zb0002 > 0 {
- zb0002--
+ for zb0003 > 0 {
+ zb0003--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
@@ -2666,7 +2913,7 @@ func (z *SignatureSecrets) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
switch string(field) {
case "SignatureVerifier":
- bts, err = (*z).SignatureVerifier.UnmarshalMsg(bts)
+ bts, err = msgp.ReadExactBytes(bts, ((*z).SignatureVerifier)[:])
if err != nil {
err = msgp.WrapError(err, "SignatureVerifier")
return
@@ -2697,13 +2944,24 @@ func (_ *SignatureSecrets) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *SignatureSecrets) Msgsize() (s int) {
- s = 1 + 18 + (*z).SignatureVerifier.Msgsize() + 3 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize))
+ s = 1 + 18 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 3 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize))
return
}
// MsgIsZero returns whether this is a zero value
func (z *SignatureSecrets) MsgIsZero() bool {
- return ((*z).SignatureVerifier.MsgIsZero()) && ((*z).SK == (ed25519PrivateKey{}))
+ return ((*z).SignatureVerifier == (PublicKey{})) && ((*z).SK == (ed25519PrivateKey{}))
+}
+
+// MaxSize returns a maximum valid message size for this message type
+func SignatureSecretsMaxSize() (s int) {
+ s = 1 + 18
+ // Calculating size of array: z.SignatureVerifier
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ s += 3
+ // Calculating size of array: z.SK
+ s += msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize))
+ return
}
// MarshalMsg implements msgp.Marshaler
@@ -2817,6 +3075,17 @@ func (z *VRFSecrets) MsgIsZero() bool {
return ((*z).PK == (VrfPubkey{})) && ((*z).SK == (VrfPrivkey{}))
}
+// MaxSize returns a maximum valid message size for this message type
+func VRFSecretsMaxSize() (s int) {
+ s = 1 + 3
+ // Calculating size of array: z.PK
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ s += 3
+ // Calculating size of array: z.SK
+ s += msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *VrfOutput) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2856,6 +3125,13 @@ func (z *VrfOutput) MsgIsZero() bool {
return (*z) == (VrfOutput{})
}
+// MaxSize returns a maximum valid message size for this message type
+func VrfOutputMaxSize() (s int) {
+ // Calculating size of array: z
+ s = msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *VrfPrivkey) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2895,6 +3171,13 @@ func (z *VrfPrivkey) MsgIsZero() bool {
return (*z) == (VrfPrivkey{})
}
+// MaxSize returns a maximum valid message size for this message type
+func VrfPrivkeyMaxSize() (s int) {
+ // Calculating size of array: z
+ s = msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *VrfProof) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2934,6 +3217,13 @@ func (z *VrfProof) MsgIsZero() bool {
return (*z) == (VrfProof{})
}
+// MaxSize returns a maximum valid message size for this message type
+func VrfProofMaxSize() (s int) {
+ // Calculating size of array: z
+ s = msgp.ArrayHeaderSize + ((80) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *VrfPubkey) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2973,6 +3263,13 @@ func (z *VrfPubkey) MsgIsZero() bool {
return (*z) == (VrfPubkey{})
}
+// MaxSize returns a maximum valid message size for this message type
+func VrfPubkeyMaxSize() (s int) {
+ // Calculating size of array: z
+ s = msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *ed25519PrivateKey) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -3012,6 +3309,13 @@ func (z *ed25519PrivateKey) MsgIsZero() bool {
return (*z) == (ed25519PrivateKey{})
}
+// MaxSize returns a maximum valid message size for this message type
+func Ed25519PrivateKeyMaxSize() (s int) {
+ // Calculating size of array: z
+ s = msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *ed25519PublicKey) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -3051,6 +3355,13 @@ func (z *ed25519PublicKey) MsgIsZero() bool {
return (*z) == (ed25519PublicKey{})
}
+// MaxSize returns a maximum valid message size for this message type
+func Ed25519PublicKeyMaxSize() (s int) {
+ // Calculating size of array: z
+ s = msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *ed25519Seed) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -3090,6 +3401,13 @@ func (z *ed25519Seed) MsgIsZero() bool {
return (*z) == (ed25519Seed{})
}
+// MaxSize returns a maximum valid message size for this message type
+func Ed25519SeedMaxSize() (s int) {
+ // Calculating size of array: z
+ s = msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *ed25519Signature) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -3129,6 +3447,13 @@ func (z *ed25519Signature) MsgIsZero() bool {
return (*z) == (ed25519Signature{})
}
+// MaxSize returns a maximum valid message size for this message type
+func Ed25519SignatureMaxSize() (s int) {
+ // Calculating size of array: z
+ s = msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *ephemeralSubkey) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -3273,3 +3598,20 @@ func (z *ephemeralSubkey) Msgsize() (s int) {
func (z *ephemeralSubkey) MsgIsZero() bool {
return ((*z).PK == (ed25519PublicKey{})) && ((*z).SK == (ed25519PrivateKey{})) && ((*z).PKSigOld == (ed25519Signature{})) && ((*z).PKSigNew == (ed25519Signature{}))
}
+
+// MaxSize returns a maximum valid message size for this message type
+func EphemeralSubkeyMaxSize() (s int) {
+ s = 1 + 3
+ // Calculating size of array: z.PK
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ s += 3
+ // Calculating size of array: z.SK
+ s += msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize))
+ s += 6
+ // Calculating size of array: z.PKSigOld
+ s += msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize))
+ s += 5
+ // Calculating size of array: z.PKSigNew
+ s += msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize))
+ return
+}
diff --git a/crypto/multisig.go b/crypto/multisig.go
index d6f19bf4a..92e711d9e 100644
--- a/crypto/multisig.go
+++ b/crypto/multisig.go
@@ -75,6 +75,18 @@ func (msig MultisigSig) Preimage() (version, threshold uint8, pks []PublicKey) {
return msig.Version, msig.Threshold, pks
}
+// Signatures returns the actual number of signatures included in the
+// multisig. That is, the number of subsigs that are not blank.
+func (msig MultisigSig) Signatures() int {
+ sigs := 0
+ for i := range msig.Subsigs {
+ if !msig.Subsigs[i].Sig.Blank() {
+ sigs++
+ }
+ }
+ return sigs
+}
+
const multiSigString = "MultisigAddr"
const maxMultisig = 255
@@ -207,7 +219,7 @@ func MultisigAssemble(unisig []MultisigSig) (msig MultisigSig, err error) {
}
for i := 0; i < len(unisig); i++ {
for j := 0; j < len(unisig[0].Subsigs); j++ {
- if (unisig[i].Subsigs[j].Sig != Signature{}) {
+ if !unisig[i].Subsigs[j].Sig.Blank() {
msig.Subsigs[j].Sig = unisig[i].Subsigs[j].Sig
}
}
@@ -228,7 +240,7 @@ func MultisigVerify(msg Hashable, addr Digest, sig MultisigSig) (err error) {
// MultisigBatchPrep performs checks on the assembled MultisigSig and adds to the batch.
// The caller must call batchVerifier.verify() to verify it.
-func MultisigBatchPrep(msg Hashable, addr Digest, sig MultisigSig, batchVerifier *BatchVerifier) (err error) {
+func MultisigBatchPrep(msg Hashable, addr Digest, sig MultisigSig, batchVerifier *BatchVerifier) error {
// short circuit: if msig doesn't have subsigs or if Subsigs are empty
// then terminate (the upper layer should now verify the unisig)
if (len(sig.Subsigs) == 0 || sig.Subsigs[0] == MultisigSubsig{}) {
@@ -238,7 +250,7 @@ func MultisigBatchPrep(msg Hashable, addr Digest, sig MultisigSig, batchVerifier
// check the address is correct
addrnew, err := MultisigAddrGenWithSubsigs(sig.Version, sig.Threshold, sig.Subsigs)
if err != nil {
- return
+ return err
}
if addr != addrnew {
return errInvalidAddress
@@ -249,37 +261,18 @@ func MultisigBatchPrep(msg Hashable, addr Digest, sig MultisigSig, batchVerifier
return errInvalidNumberOfSignature
}
- // check that we don't have too few multisig subsigs
- if len(sig.Subsigs) < int(sig.Threshold) {
- return errInvalidNumberOfSignature
- }
-
// checks the number of non-blank signatures is no less than threshold
- var counter uint8
- for _, subsigi := range sig.Subsigs {
- if (subsigi.Sig != Signature{}) {
- counter++
- }
- }
- if counter < sig.Threshold {
+ if sig.Signatures() < int(sig.Threshold) {
return errInvalidNumberOfSignature
}
- // checks individual signature verifies
- var sigCount int
+ // queues individual signature verifies
for _, subsigi := range sig.Subsigs {
- if (subsigi.Sig != Signature{}) {
+ if !subsigi.Sig.Blank() {
batchVerifier.EnqueueSignature(subsigi.Key, msg, subsigi.Sig)
- sigCount++
}
}
-
- // sanity check. if we get here then every non-blank subsig should have
- // been verified successfully, and we should have had enough of them
- if sigCount < int(sig.Threshold) {
- return errInvalidNumberOfSignature
- }
- return
+ return nil
}
// MultisigAdd adds unisig to an existing msig
@@ -315,8 +308,8 @@ func MultisigAdd(unisig []MultisigSig, msig *MultisigSig) (err error) {
// update the msig
for i := 0; i < len(unisig); i++ {
for j := 0; j < len(msig.Subsigs); j++ {
- if (unisig[i].Subsigs[j].Sig != Signature{}) {
- if (msig.Subsigs[j].Sig == Signature{}) {
+ if !unisig[i].Subsigs[j].Sig.Blank() {
+ if msig.Subsigs[j].Sig.Blank() {
// add the signature
msig.Subsigs[j].Sig = unisig[i].Subsigs[j].Sig
} else if msig.Subsigs[j].Sig != unisig[i].Subsigs[j].Sig {
@@ -354,13 +347,13 @@ func MultisigMerge(msig1 MultisigSig, msig2 MultisigSig) (msigt MultisigSig, err
msigt.Subsigs = make([]MultisigSubsig, len(msig1.Subsigs))
for i := 0; i < len(msigt.Subsigs); i++ {
msigt.Subsigs[i].Key = msig1.Subsigs[i].Key
- if (msig1.Subsigs[i].Sig == Signature{}) {
- if (msig2.Subsigs[i].Sig != Signature{}) {
+ if msig1.Subsigs[i].Sig.Blank() {
+ if !msig2.Subsigs[i].Sig.Blank() {
// update signature with msig2's signature
msigt.Subsigs[i].Sig = msig2.Subsigs[i].Sig
}
- } else if (msig2.Subsigs[i].Sig == Signature{} || // msig2's sig is empty
- msig2.Subsigs[i].Sig == msig1.Subsigs[i].Sig) { // valid duplicates
+ } else if msig2.Subsigs[i].Sig.Blank() || // msig2's sig is empty
+ msig2.Subsigs[i].Sig == msig1.Subsigs[i].Sig { // valid duplicates
// update signature with msig1's signature
msigt.Subsigs[i].Sig = msig1.Subsigs[i].Sig
} else {
diff --git a/crypto/multisig_test.go b/crypto/multisig_test.go
index e7f4a17da..2f630f7fc 100644
--- a/crypto/multisig_test.go
+++ b/crypto/multisig_test.go
@@ -39,7 +39,6 @@ func MultisigSigPrint(sig MultisigSig) {
// test cases for address generation
// detect invalid threshold and versions
-//
func TestMultisigAddr(t *testing.T) {
partitiontest.PartitionTest(t)
var s Seed
diff --git a/crypto/secp256k1/curve.go b/crypto/secp256k1/curve.go
index 59e7100c5..ade176740 100644
--- a/crypto/secp256k1/curve.go
+++ b/crypto/secp256k1/curve.go
@@ -106,7 +106,7 @@ func (BitCurve *BitCurve) IsOnCurve(x, y *big.Int) bool {
return x3.Cmp(y2) == 0
}
-//TODO: double check if the function is okay
+// TODO: double check if the function is okay
// affineFromJacobian reverses the Jacobian transform. See the comment at the
// top of the file.
func (BitCurve *BitCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) {
diff --git a/crypto/stateproof/committableSignatureSlot.go b/crypto/stateproof/committableSignatureSlot.go
index 7ae0e8ac5..2c296e75f 100644
--- a/crypto/stateproof/committableSignatureSlot.go
+++ b/crypto/stateproof/committableSignatureSlot.go
@@ -37,6 +37,7 @@ var ErrIndexOutOfBound = errors.New("index is out of bound")
// committableSignatureSlotArray is used to create a merkle tree on the stateproof's
// signature array. it serializes the MSS signatures using a specific format
// state proof signature array.
+//
//msgp:ignore committableSignatureSlotArray
type committableSignatureSlotArray []sigslot
diff --git a/crypto/stateproof/msgp_gen.go b/crypto/stateproof/msgp_gen.go
index 8d4d8a350..94c84f000 100644
--- a/crypto/stateproof/msgp_gen.go
+++ b/crypto/stateproof/msgp_gen.go
@@ -7,7 +7,9 @@ import (
"github.com/algorand/msgp/msgp"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merklearray"
+ "github.com/algorand/go-algorand/crypto/merklesignature"
"github.com/algorand/go-algorand/data/basics"
)
@@ -19,6 +21,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> MessageHashMaxSize()
//
// Prover
// |-----> (*) MarshalMsg
@@ -27,6 +30,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> ProverMaxSize()
//
// ProverPersistedFields
// |-----> (*) MarshalMsg
@@ -35,6 +39,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> ProverPersistedFieldsMaxSize()
//
// Reveal
// |-----> (*) MarshalMsg
@@ -43,6 +48,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> RevealMaxSize()
//
// StateProof
// |-----> (*) MarshalMsg
@@ -51,6 +57,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> StateProofMaxSize()
//
// sigslotCommit
// |-----> (*) MarshalMsg
@@ -59,6 +66,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> SigslotCommitMaxSize()
//
// MarshalMsg implements msgp.Marshaler
@@ -100,6 +108,13 @@ func (z *MessageHash) MsgIsZero() bool {
return (*z) == (MessageHash{})
}
+// MaxSize returns a maximum valid message size for this message type
+func MessageHashMaxSize() (s int) {
+ // Calculating size of array: z
+ s = msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *Prover) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -429,6 +444,20 @@ func (z *Prover) MsgIsZero() bool {
return ((*z).ProverPersistedFields.Data == (MessageHash{})) && ((*z).ProverPersistedFields.Round == 0) && (len((*z).ProverPersistedFields.Participants) == 0) && ((*z).ProverPersistedFields.Parttree == nil) && ((*z).ProverPersistedFields.LnProvenWeight == 0) && ((*z).ProverPersistedFields.ProvenWeight == 0) && ((*z).ProverPersistedFields.StrengthTarget == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func ProverMaxSize() (s int) {
+ s = 1 + 5
+ // Calculating size of array: z.ProverPersistedFields.Data
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ s += 4 + msgp.Uint64Size + 6
+ // Calculating size of slice: z.ProverPersistedFields.Participants
+ s += msgp.ArrayHeaderSize + ((VotersAllocBound) * (basics.ParticipantMaxSize()))
+ s += 9
+ s += merklearray.TreeMaxSize()
+ s += 6 + msgp.Uint64Size + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *ProverPersistedFields) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -758,6 +787,20 @@ func (z *ProverPersistedFields) MsgIsZero() bool {
return ((*z).Data == (MessageHash{})) && ((*z).Round == 0) && (len((*z).Participants) == 0) && ((*z).Parttree == nil) && ((*z).LnProvenWeight == 0) && ((*z).ProvenWeight == 0) && ((*z).StrengthTarget == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func ProverPersistedFieldsMaxSize() (s int) {
+ s = 1 + 5
+ // Calculating size of array: z.Data
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ s += 4 + msgp.Uint64Size + 6
+ // Calculating size of slice: z.Participants
+ s += msgp.ArrayHeaderSize + ((VotersAllocBound) * (basics.ParticipantMaxSize()))
+ s += 9
+ s += merklearray.TreeMaxSize()
+ s += 6 + msgp.Uint64Size + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *Reveal) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1037,6 +1080,12 @@ func (z *Reveal) MsgIsZero() bool {
return (((*z).SigSlot.Sig.MsgIsZero()) && ((*z).SigSlot.L == 0)) && ((*z).Part.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func RevealMaxSize() (s int) {
+ s = 1 + 2 + 1 + 2 + merklesignature.SignatureMaxSize() + 2 + msgp.Uint64Size + 2 + basics.ParticipantMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *StateProof) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1409,6 +1458,26 @@ func (z *StateProof) MsgIsZero() bool {
return ((*z).SigCommit.MsgIsZero()) && ((*z).SignedWeight == 0) && ((*z).SigProofs.MsgIsZero()) && ((*z).PartProofs.MsgIsZero()) && ((*z).MerkleSignatureSaltVersion == 0) && (len((*z).Reveals) == 0) && (len((*z).PositionsToReveal) == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func StateProofMaxSize() (s int) {
+ s = 1 + 2 + crypto.GenericDigestMaxSize() + 2 + msgp.Uint64Size + 2
+ // Using maxtotalbytes for: z.SigProofs
+ s += SigPartProofMaxSize
+ s += 2
+ // Using maxtotalbytes for: z.PartProofs
+ s += SigPartProofMaxSize
+ s += 2 + msgp.ByteSize + 2
+ s += msgp.MapHeaderSize
+ // Adding size of map keys for z.Reveals
+ s += MaxReveals * (msgp.Uint64Size)
+ // Adding size of map values for z.Reveals
+ s += MaxReveals * (RevealMaxSize())
+ s += 3
+ // Calculating size of slice: z.PositionsToReveal
+ s += msgp.ArrayHeaderSize + ((MaxReveals) * (msgp.Uint64Size))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *sigslotCommit) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1537,3 +1606,9 @@ func (z *sigslotCommit) Msgsize() (s int) {
func (z *sigslotCommit) MsgIsZero() bool {
return ((*z).Sig.MsgIsZero()) && ((*z).L == 0)
}
+
+// MaxSize returns a maximum valid message size for this message type
+func SigslotCommitMaxSize() (s int) {
+ s = 1 + 2 + merklesignature.SignatureMaxSize() + 2 + msgp.Uint64Size
+ return
+}
diff --git a/crypto/stateproof/prover.go b/crypto/stateproof/prover.go
index 64484ce35..f3a8a43bc 100644
--- a/crypto/stateproof/prover.go
+++ b/crypto/stateproof/prover.go
@@ -228,9 +228,9 @@ func (b *Prover) CreateProof() (*StateProof, error) {
revealsSequence := make([]uint64, nr)
for j := uint64(0); j < nr; j++ {
coin := coinHash.getNextCoin()
- pos, err := b.coinIndex(coin)
- if err != nil {
- return nil, err
+ pos, idxErr := b.coinIndex(coin)
+ if idxErr != nil {
+ return nil, idxErr
}
if pos >= uint64(len(b.Participants)) {
diff --git a/crypto/stateproof/prover_test.go b/crypto/stateproof/prover_test.go
index 6d374e059..b9680de3a 100644
--- a/crypto/stateproof/prover_test.go
+++ b/crypto/stateproof/prover_test.go
@@ -649,7 +649,7 @@ func BenchmarkBuildVerify(b *testing.B) {
data := testMessage("hello world").IntoStateProofMessageHash()
var parts []basics.Participant
- var partkeys []*merklesignature.Secrets
+ //var partkeys []*merklesignature.Secrets
var sigs []merklesignature.Signature
for i := 0; i < npart; i++ {
signer := generateTestSigner(0, stateProofIntervalForTests+1, stateProofIntervalForTests, a)
@@ -662,7 +662,7 @@ func BenchmarkBuildVerify(b *testing.B) {
sig, err := signerInRound.SignBytes(data[:])
require.NoError(b, err, "failed to create keys")
- partkeys = append(partkeys, signer)
+ //partkeys = append(partkeys, signer)
sigs = append(sigs, sig)
parts = append(parts, part)
}
diff --git a/crypto/stateproof/structs.go b/crypto/stateproof/structs.go
index 7ee599b7a..39e5e0f2f 100644
--- a/crypto/stateproof/structs.go
+++ b/crypto/stateproof/structs.go
@@ -72,8 +72,8 @@ type StateProof struct {
SigCommit crypto.GenericDigest `codec:"c"`
SignedWeight uint64 `codec:"w"`
- SigProofs merklearray.Proof `codec:"S"`
- PartProofs merklearray.Proof `codec:"P"`
+ SigProofs merklearray.Proof `codec:"S,maxtotalbytes=SigPartProofMaxSize"`
+ PartProofs merklearray.Proof `codec:"P,maxtotalbytes=SigPartProofMaxSize"`
MerkleSignatureSaltVersion byte `codec:"v"`
// Reveals is a sparse map from the position being revealed
// to the corresponding elements from the sigs and participants
@@ -82,6 +82,11 @@ type StateProof struct {
PositionsToReveal []uint64 `codec:"pr,allocbound=MaxReveals"`
}
+// SigPartProofMaxSize is the maximum valid size of SigProofs and PartProofs elements of the Stateproof struct in bytes.
+// It is equal to merklearray.ProofMaxSizeByElements(config.StateProofTopVoters/2)
+// See merklearray.Proof comment for explanation on the bound calculation
+const SigPartProofMaxSize = 35353
+
func (s StateProof) stringBuild() (b strings.Builder) {
b.WriteString("StateProof: {")
defer b.WriteRune('}')
diff --git a/crypto/stateproof/weights.go b/crypto/stateproof/weights.go
index aa849ec6a..43a22affd 100644
--- a/crypto/stateproof/weights.go
+++ b/crypto/stateproof/weights.go
@@ -115,7 +115,6 @@ func verifyWeights(signedWeight uint64, lnProvenWeight uint64, numOfReveals uint
// T and b are defined in the code as the constants ln2IntApproximation and precisionBits respectively,
// and P is set to lnProvenWeight argument.
//
-//
// more details can be found on the Algorand's spec
func numReveals(signedWeight uint64, lnProvenWeight uint64, strengthTarget uint64) (uint64, error) {
// in order to make the code more readable and reusable we will define the following expressions:
diff --git a/crypto/stateproof/weights_test.go b/crypto/stateproof/weights_test.go
index 5800c49f5..bf1fd2f64 100644
--- a/crypto/stateproof/weights_test.go
+++ b/crypto/stateproof/weights_test.go
@@ -21,6 +21,8 @@ import (
"math"
"testing"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto/merklearray"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/stretchr/testify/require"
)
@@ -184,6 +186,15 @@ func TestNumReveals(t *testing.T) {
}
}
+func TestSigPartProofMaxSize(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ // Ensures that the SigPartProofMaxSize constant used for maxtotalbytes for StateProof.(Sig|Part)Proof(s) is
+ // correct. It should be logically bound by the maximum number of StateProofTopVoters. It is scaled by 1/2
+ // see merkelarray.Proof comment for explanation of the size calculation.
+ require.Equal(t, SigPartProofMaxSize, merklearray.ProofMaxSizeByElements(config.StateProofTopVoters/2))
+}
+
func BenchmarkVerifyWeights(b *testing.B) {
billion := uint64(1000 * 1000 * 1000)
microalgo := uint64(1000 * 1000)
diff --git a/crypto/vrf.go b/crypto/vrf.go
index bfdf4ec40..948a84ddc 100644
--- a/crypto/vrf.go
+++ b/crypto/vrf.go
@@ -42,6 +42,11 @@ func init() {
// VRFVerifier is a deprecated name for VrfPubkey
type VRFVerifier = VrfPubkey
+// VRFVerifierMaxSize forwards to base implementation since it's expected by the msgp generated MaxSize functions
+func VRFVerifierMaxSize() int {
+ return VrfPubkeyMaxSize()
+}
+
// VRFProof is a deprecated name for VrfProof
type VRFProof = VrfProof
diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json
index dfeaaa01b..5adf01711 100644
--- a/daemon/algod/api/algod.oas2.json
+++ b/daemon/algod/api/algod.oas2.json
@@ -2413,6 +2413,12 @@
"$ref": "#/definitions/ErrorResponse"
}
},
+ "408": {
+ "description": "Request Timeout",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
"500": {
"description": "Internal Error",
"schema": {
@@ -3102,6 +3108,29 @@
}
}
},
+ "AvmValue": {
+ "description": "Represents an AVM value.",
+ "type": "object",
+ "required": [
+ "type"
+ ],
+ "properties": {
+ "type": {
+ "description": "value type. Value `1` refers to **bytes**, value `2` refers to **uint64**",
+ "type": "integer"
+ },
+ "bytes": {
+ "description": "bytes value.",
+ "type": "string",
+ "format": "byte"
+ },
+ "uint": {
+ "description": "uint value.",
+ "type": "integer",
+ "x-algorand-format": "uint64"
+ }
+ }
+ },
"StateDelta": {
"description": "Application state delta.",
"type": "array",
@@ -3453,6 +3482,9 @@
"extra-opcode-budget": {
"description": "Applies extra opcode budget during simulation for each transaction group.",
"type": "integer"
+ },
+ "exec-trace-config": {
+ "$ref": "#/definitions/SimulateTraceConfig"
}
}
},
@@ -3475,6 +3507,24 @@
}
}
},
+ "SimulateTraceConfig": {
+ "description": "An object that configures simulation execution trace.",
+ "type": "object",
+ "properties": {
+ "enable": {
+ "description": "A boolean option for opting in execution trace features simulation endpoint.",
+ "type": "boolean"
+ },
+ "stack-change": {
+ "description": "A boolean option enabling returning stack changes together with execution trace during simulation.",
+ "type": "boolean"
+ },
+ "scratch-change": {
+ "description": "A boolean option enabling returning scratch slot changes together with execution trace during simulation.",
+ "type": "boolean"
+ }
+ }
+ },
"Box": {
"description": "Box name and its content.",
"type": "object",
@@ -3726,6 +3776,9 @@
"logic-sig-budget-consumed": {
"description": "Budget used during execution of a logic sig transaction.",
"type": "integer"
+ },
+ "exec-trace": {
+ "$ref": "#/definitions/SimulationTransactionExecTrace"
}
}
},
@@ -3830,6 +3883,95 @@
"type": "integer"
}
}
+ },
+ "ScratchChange": {
+ "description": "A write operation into a scratch slot.",
+ "type": "object",
+ "required": [
+ "slot",
+ "new-value"
+ ],
+ "properties": {
+ "slot": {
+ "description": "The scratch slot written.",
+ "type": "integer"
+ },
+ "new-value": {
+ "$ref": "#/definitions/AvmValue"
+ }
+ }
+ },
+ "SimulationOpcodeTraceUnit": {
+ "description": "The set of trace information and effect from evaluating a single opcode.",
+ "type": "object",
+ "required": [
+ "pc"
+ ],
+ "properties": {
+ "pc": {
+ "description": "The program counter of the current opcode being evaluated.",
+ "type": "integer"
+ },
+ "scratch-changes": {
+ "description": "The writes into scratch slots.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/ScratchChange"
+ }
+ },
+ "spawned-inners": {
+ "description": "The indexes of the traces for inner transactions spawned by this opcode, if any.",
+ "type": "array",
+ "items": {
+ "type": "integer"
+ }
+ },
+ "stack-pop-count": {
+ "description": "The number of deleted stack values by this opcode.",
+ "type": "integer"
+ },
+ "stack-additions": {
+ "description": "The values added by this opcode to the stack.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/AvmValue"
+ }
+ }
+ }
+ },
+ "SimulationTransactionExecTrace": {
+ "description": "The execution trace of calling an app or a logic sig, containing the inner app call trace in a recursive way.",
+ "type": "object",
+ "properties": {
+ "approval-program-trace": {
+ "description": "Program trace that contains a trace of opcode effects in an approval program.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/SimulationOpcodeTraceUnit"
+ }
+ },
+ "clear-state-program-trace": {
+ "description": "Program trace that contains a trace of opcode effects in a clear state program.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/SimulationOpcodeTraceUnit"
+ }
+ },
+ "logic-sig-trace": {
+ "description": "Program trace that contains a trace of opcode effects in a logic sig.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/SimulationOpcodeTraceUnit"
+ }
+ },
+ "inner-trace": {
+ "description": "An array of SimulationTransactionExecTrace representing the execution trace of any inner transactions executed.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/SimulationTransactionExecTrace"
+ }
+ }
+ }
}
},
"parameters": {
@@ -4459,6 +4601,9 @@
},
"eval-overrides": {
"$ref": "#/definitions/SimulationEvalOverrides"
+ },
+ "exec-trace-config": {
+ "$ref": "#/definitions/SimulateTraceConfig"
}
}
}
diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml
index 2c65bdde1..f29797fe4 100644
--- a/daemon/algod/api/algod.oas3.yml
+++ b/daemon/algod/api/algod.oas3.yml
@@ -790,6 +790,9 @@
"eval-overrides": {
"$ref": "#/components/schemas/SimulationEvalOverrides"
},
+ "exec-trace-config": {
+ "$ref": "#/components/schemas/SimulateTraceConfig"
+ },
"last-round": {
"description": "The round immediately preceding this simulation. State changes through this round were used to run this simulation.",
"type": "integer"
@@ -1397,6 +1400,30 @@
],
"type": "object"
},
+ "AvmValue": {
+ "description": "Represents an AVM value.",
+ "properties": {
+ "bytes": {
+ "description": "bytes value.",
+ "format": "byte",
+ "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
+ "type": "string"
+ },
+ "type": {
+ "description": "value type. Value `1` refers to **bytes**, value `2` refers to **uint64**",
+ "type": "integer"
+ },
+ "uint": {
+ "description": "uint value.",
+ "type": "integer",
+ "x-algorand-format": "uint64"
+ }
+ },
+ "required": [
+ "type"
+ ],
+ "type": "object"
+ },
"Box": {
"description": "Box name and its content.",
"properties": {
@@ -1907,6 +1934,23 @@
],
"type": "object"
},
+ "ScratchChange": {
+ "description": "A write operation into a scratch slot.",
+ "properties": {
+ "new-value": {
+ "$ref": "#/components/schemas/AvmValue"
+ },
+ "slot": {
+ "description": "The scratch slot written.",
+ "type": "integer"
+ }
+ },
+ "required": [
+ "new-value",
+ "slot"
+ ],
+ "type": "object"
+ },
"SimulateRequest": {
"description": "Request type for simulation endpoint.",
"properties": {
@@ -1918,6 +1962,9 @@
"description": "Lifts limits on log opcode usage during simulation.",
"type": "boolean"
},
+ "exec-trace-config": {
+ "$ref": "#/components/schemas/SimulateTraceConfig"
+ },
"extra-opcode-budget": {
"description": "Applies extra opcode budget during simulation for each transaction group.",
"type": "integer"
@@ -1954,6 +2001,24 @@
],
"type": "object"
},
+ "SimulateTraceConfig": {
+ "description": "An object that configures simulation execution trace.",
+ "properties": {
+ "enable": {
+ "description": "A boolean option for opting in execution trace features simulation endpoint.",
+ "type": "boolean"
+ },
+ "scratch-change": {
+ "description": "A boolean option enabling returning scratch slot changes together with execution trace during simulation.",
+ "type": "boolean"
+ },
+ "stack-change": {
+ "description": "A boolean option enabling returning stack changes together with execution trace during simulation.",
+ "type": "boolean"
+ }
+ },
+ "type": "object"
+ },
"SimulateTransactionGroupResult": {
"description": "Simulation result for an atomic transaction group",
"properties": {
@@ -1996,6 +2061,9 @@
"description": "Budget used during execution of an app call transaction. This value includes budged used by inner app calls spawned by this transaction.",
"type": "integer"
},
+ "exec-trace": {
+ "$ref": "#/components/schemas/SimulationTransactionExecTrace"
+ },
"logic-sig-budget-consumed": {
"description": "Budget used during execution of a logic sig transaction.",
"type": "integer"
@@ -2031,6 +2099,78 @@
},
"type": "object"
},
+ "SimulationOpcodeTraceUnit": {
+ "description": "The set of trace information and effect from evaluating a single opcode.",
+ "properties": {
+ "pc": {
+ "description": "The program counter of the current opcode being evaluated.",
+ "type": "integer"
+ },
+ "scratch-changes": {
+ "description": "The writes into scratch slots.",
+ "items": {
+ "$ref": "#/components/schemas/ScratchChange"
+ },
+ "type": "array"
+ },
+ "spawned-inners": {
+ "description": "The indexes of the traces for inner transactions spawned by this opcode, if any.",
+ "items": {
+ "type": "integer"
+ },
+ "type": "array"
+ },
+ "stack-additions": {
+ "description": "The values added by this opcode to the stack.",
+ "items": {
+ "$ref": "#/components/schemas/AvmValue"
+ },
+ "type": "array"
+ },
+ "stack-pop-count": {
+ "description": "The number of deleted stack values by this opcode.",
+ "type": "integer"
+ }
+ },
+ "required": [
+ "pc"
+ ],
+ "type": "object"
+ },
+ "SimulationTransactionExecTrace": {
+ "description": "The execution trace of calling an app or a logic sig, containing the inner app call trace in a recursive way.",
+ "properties": {
+ "approval-program-trace": {
+ "description": "Program trace that contains a trace of opcode effects in an approval program.",
+ "items": {
+ "$ref": "#/components/schemas/SimulationOpcodeTraceUnit"
+ },
+ "type": "array"
+ },
+ "clear-state-program-trace": {
+ "description": "Program trace that contains a trace of opcode effects in a clear state program.",
+ "items": {
+ "$ref": "#/components/schemas/SimulationOpcodeTraceUnit"
+ },
+ "type": "array"
+ },
+ "inner-trace": {
+ "description": "An array of SimulationTransactionExecTrace representing the execution trace of any inner transactions executed.",
+ "items": {
+ "$ref": "#/components/schemas/SimulationTransactionExecTrace"
+ },
+ "type": "array"
+ },
+ "logic-sig-trace": {
+ "description": "Program trace that contains a trace of opcode effects in a logic sig.",
+ "items": {
+ "$ref": "#/components/schemas/SimulationOpcodeTraceUnit"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
"StateDelta": {
"description": "Application state delta.",
"items": {
@@ -3834,6 +3974,16 @@
},
"description": "Invalid API Token"
},
+ "408": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Request Timeout"
+ },
"500": {
"content": {
"application/json": {
@@ -6289,6 +6439,9 @@
"eval-overrides": {
"$ref": "#/components/schemas/SimulationEvalOverrides"
},
+ "exec-trace-config": {
+ "$ref": "#/components/schemas/SimulateTraceConfig"
+ },
"last-round": {
"description": "The round immediately preceding this simulation. State changes through this round were used to run this simulation.",
"type": "integer"
@@ -6319,6 +6472,9 @@
"eval-overrides": {
"$ref": "#/components/schemas/SimulationEvalOverrides"
},
+ "exec-trace-config": {
+ "$ref": "#/components/schemas/SimulateTraceConfig"
+ },
"last-round": {
"description": "The round immediately preceding this simulation. State changes through this round were used to run this simulation.",
"type": "integer"
diff --git a/daemon/algod/api/client/restClient.go b/daemon/algod/api/client/restClient.go
index 829e7c68e..0b3abecc0 100644
--- a/daemon/algod/api/client/restClient.go
+++ b/daemon/algod/api/client/restClient.go
@@ -754,6 +754,18 @@ func (client RestClient) GetLedgerStateDelta(round uint64) (response model.Ledge
return
}
+// GetLedgerStateDeltaForTransactionGroup retrieves the ledger state delta for the txn group specified by the id
+func (client RestClient) GetLedgerStateDeltaForTransactionGroup(id string) (response model.LedgerStateDeltaForTransactionGroupResponse, err error) {
+ err = client.get(&response, fmt.Sprintf("/v2/deltas/txn/group/%s", id), nil)
+ return
+}
+
+// GetTransactionGroupLedgerStateDeltasForRound retrieves the ledger state deltas for the txn groups in the specified round
+func (client RestClient) GetTransactionGroupLedgerStateDeltasForRound(round uint64) (response model.TransactionGroupLedgerStateDeltasForRoundResponse, err error) {
+ err = client.get(&response, fmt.Sprintf("/v2/deltas/%d/txn/group", round), nil)
+ return
+}
+
// SetBlockTimestampOffset sets the offset in seconds to add to the block timestamp when in devmode
func (client RestClient) SetBlockTimestampOffset(offset uint64) (err error) {
err = client.post(nil, fmt.Sprintf("/v2/devmode/blocks/offset/%d", offset), nil, nil, true)
diff --git a/daemon/algod/api/server/lib/middlewares/auth.go b/daemon/algod/api/server/lib/middlewares/auth.go
index 363e2b2b0..d7cc336cf 100644
--- a/daemon/algod/api/server/lib/middlewares/auth.go
+++ b/daemon/algod/api/server/lib/middlewares/auth.go
@@ -72,9 +72,9 @@ func (auth *AuthMiddleware) handler(next echo.HandlerFunc) echo.HandlerFunc {
providedToken := []byte(ctx.Request().Header.Get(auth.header))
if len(providedToken) == 0 {
// Accept tokens provided in a bearer token format.
- authentication := strings.SplitN(ctx.Request().Header.Get("Authorization"), " ", 2)
- if len(authentication) == 2 && strings.EqualFold("Bearer", authentication[0]) {
- providedToken = []byte(authentication[1])
+ bearer, token, found := strings.Cut(ctx.Request().Header.Get("Authorization"), " ")
+ if found && strings.EqualFold("Bearer", bearer) {
+ providedToken = []byte(token)
}
}
diff --git a/daemon/algod/api/server/lib/middlewares/cors.go b/daemon/algod/api/server/lib/middlewares/cors.go
index 4bdf532e3..049184aad 100644
--- a/daemon/algod/api/server/lib/middlewares/cors.go
+++ b/daemon/algod/api/server/lib/middlewares/cors.go
@@ -28,6 +28,6 @@ func MakeCORS(tokenHeader string) echo.MiddlewareFunc {
return middleware.CORSWithConfig(middleware.CORSConfig{
AllowOrigins: []string{"*"},
AllowHeaders: []string{tokenHeader, "Content-Type"},
- AllowMethods: []string{http.MethodGet, http.MethodPost, http.MethodPut, http.MethodOptions},
+ AllowMethods: []string{http.MethodGet, http.MethodPost, http.MethodPut, http.MethodDelete, http.MethodOptions},
})
}
diff --git a/daemon/algod/api/server/v2/account.go b/daemon/algod/api/server/v2/account.go
index 9c25021b9..41a32b6c8 100644
--- a/daemon/algod/api/server/v2/account.go
+++ b/daemon/algod/api/server/v2/account.go
@@ -26,6 +26,7 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/basics"
+ "golang.org/x/exp/slices"
)
// AssetHolding converts between basics.AssetHolding and model.AssetHolding
@@ -132,9 +133,9 @@ func AccountDataToAccount(
AppsLocalState: &appsLocalState,
TotalAppsOptedIn: uint64(len(appsLocalState)),
AppsTotalSchema: &totalAppSchema,
- AppsTotalExtraPages: numOrNil(totalExtraPages),
- TotalBoxes: numOrNil(record.TotalBoxes),
- TotalBoxBytes: numOrNil(record.TotalBoxBytes),
+ AppsTotalExtraPages: omitEmpty(totalExtraPages),
+ TotalBoxes: omitEmpty(record.TotalBoxes),
+ TotalBoxBytes: omitEmpty(record.TotalBoxBytes),
MinBalance: minBalance.Raw,
}, nil
}
@@ -429,7 +430,7 @@ func AppParamsToApplication(creator string, appIdx basics.AppIndex, appParams *b
Creator: creator,
ApprovalProgram: appParams.ApprovalProgram,
ClearStateProgram: appParams.ClearStateProgram,
- ExtraProgramPages: numOrNil(extraProgramPages),
+ ExtraProgramPages: omitEmpty(extraProgramPages),
GlobalState: globalState,
LocalStateSchema: &model.ApplicationStateSchema{
NumByteSlice: appParams.LocalStateSchema.NumByteSlice,
@@ -465,11 +466,11 @@ func AssetParamsToAsset(creator string, idx basics.AssetIndex, params *basics.As
Total: params.Total,
Decimals: uint64(params.Decimals),
DefaultFrozen: &frozen,
- Name: strOrNil(printableUTF8OrEmpty(params.AssetName)),
+ Name: omitEmpty(printableUTF8OrEmpty(params.AssetName)),
NameB64: byteOrNil([]byte(params.AssetName)),
- UnitName: strOrNil(printableUTF8OrEmpty(params.UnitName)),
+ UnitName: omitEmpty(printableUTF8OrEmpty(params.UnitName)),
UnitNameB64: byteOrNil([]byte(params.UnitName)),
- Url: strOrNil(printableUTF8OrEmpty(params.URL)),
+ Url: omitEmpty(printableUTF8OrEmpty(params.URL)),
UrlB64: byteOrNil([]byte(params.URL)),
Clawback: addrOrNil(params.Clawback),
Freeze: addrOrNil(params.Freeze),
@@ -477,8 +478,7 @@ func AssetParamsToAsset(creator string, idx basics.AssetIndex, params *basics.As
Reserve: addrOrNil(params.Reserve),
}
if params.MetadataHash != ([32]byte{}) {
- metadataHash := make([]byte, len(params.MetadataHash))
- copy(metadataHash, params.MetadataHash[:])
+ metadataHash := slices.Clone(params.MetadataHash[:])
assetParams.MetadataHash = &metadataHash
}
diff --git a/daemon/algod/api/server/v2/generated/data/routes.go b/daemon/algod/api/server/v2/generated/data/routes.go
index 7d2982705..af7c75e2d 100644
--- a/daemon/algod/api/server/v2/generated/data/routes.go
+++ b/daemon/algod/api/server/v2/generated/data/routes.go
@@ -114,180 +114,189 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9+5PbNtLgv4LS91X5ceLIr2TXU5X6bmIn2bk4icszyd63ti+ByJaEHRLgAqBGis//",
- "+xUaAAmSIEXNTOzdq/3JHhGPRqPR6Be6P8xSUZSCA9dqdvphVlJJC9Ag8S+apqLiOmGZ+SsDlUpWaib4",
- "7NR/I0pLxtez+YyZX0uqN7P5jNMCmjam/3wm4R8Vk5DNTrWsYD5T6QYKagbW+9K0rkfaJWuRuCHO7BDn",
- "L2cfRz7QLJOgVB/Kn3i+J4yneZUB0ZJyRVPzSZFrpjdEb5girjNhnAgORKyI3rQakxWDPFMnfpH/qEDu",
- "g1W6yYeX9LEBMZEihz6cL0SxZBw8VFADVW8I0YJksMJGG6qJmcHA6htqQRRQmW7ISsgDoFogQniBV8Xs",
- "9O1MAc9A4m6lwLb435UE+B0STeUa9Oz9PLa4lQaZaFZElnbusC9BVblWBNviGtdsC5yYXifkh0ppsgRC",
- "OXnz7Qvy9OnT52YhBdUaMkdkg6tqZg/XZLvPTmcZ1eA/92mN5mshKc+Suv2bb1/g/BdugVNbUaUgfljO",
- "zBdy/nJoAb5jhIQY17DGfWhRv+kRORTNz0tYCQkT98Q2vtNNCef/rLuSUp1uSsG4juwLwa/Efo7ysKD7",
- "GA+rAWi1Lw2mpBn07aPk+fsPj+ePH338j7dnyd/cn188/Thx+S/qcQ9gINowraQEnu6TtQSKp2VDeR8f",
- "bxw9qI2o8oxs6BY3nxbI6l1fYvpa1rmleWXohKVSnOVroQh1ZJTBila5Jn5iUvHcsCkzmqN2whQppdiy",
- "DLK54b7XG5ZuSEqVHQLbkWuW54YGKwXZEK3FVzdymD6GKDFw3QgfuKB/XmQ06zqACdghN0jSXChItDhw",
- "Pfkbh/KMhBdKc1ep4y4rcrkBgpObD/ayRdxxQ9N5vica9zUjVBFK/NU0J2xF9qIi17g5ObvC/m41BmsF",
- "MUjDzWndo+bwDqGvh4wI8pZC5EA5Is+fuz7K+IqtKwmKXG9Ab9ydJ0GVgisgYvl3SLXZ9v918dOPREjy",
- "AyhF1/CaplcEeCoyyE7I+YpwoQPScLSEODQ9h9bh4Ipd8n9XwtBEodYlTa/iN3rOChZZ1Q90x4qqILwq",
- "liDNlvorRAsiQVeSDwFkRzxAigXd9Se9lBVPcf+baVuynKE2psqc7hFhBd199WjuwFGE5jkpgWeMr4ne",
- "8UE5zsx9GLxEiopnE8QcbfY0uFhVCSlbMchIPcoIJG6aQ/Awfhw8jfAVgOMHGQSnnuUAOBx2EZoxp9t8",
- "ISVdQ0AyJ+Rnx9zwqxZXwGtCJ8s9fiolbJmoVN1pAEacelwC50JDUkpYsQiNXTh0GAZj2zgOXDgZKBVc",
- "U8YhM8wZgRYaLLMahCmYcFzf6d/iS6rgy2dDd3zzdeLur0R310d3fNJuY6PEHsnI1Wm+ugMbl6xa/Sfo",
- "h+Hciq0T+3NvI9n60tw2K5bjTfR3s38eDZVCJtBChL+bFFtzqisJp+/4Q/MXSciFpjyjMjO/FPanH6pc",
- "swu2Nj/l9qdXYs3SC7YeQGYNa1Thwm6F/ceMF2fHehfVK14JcVWV4YLSluK63JPzl0ObbMc8ljDPam03",
- "VDwud14ZObaH3tUbOQDkIO5KahpewV6CgZamK/xnt0J6oiv5u/mnLHPTW5erGGoNHbsrGc0HzqxwVpY5",
- "S6lB4hv32Xw1TACsIkGbFgu8UE8/BCCWUpQgNbOD0rJMcpHSPFGaahzpPyWsZqez/1g09peF7a4WweSv",
- "TK8L7GREVisGJbQsjxjjtRF91AizMAwaPyGbsGwPhSbG7SYaUmKGBeewpVyfNCpLix/UB/itm6nBt5V2",
- "LL47KtggwoltuARlJWDb8J4iAeoJopUgWlEgXediWf9w/6wsGwzi97OytPhA6REYCmawY0qrB7h82pyk",
- "cJ7zlyfku3BsFMUFz/fmcrCihrkbVu7WcrdYbVtya2hGvKcIbqeQJ2ZrPBqMmH8XFIdqxUbkRuo5SCum",
- "8V9c25DMzO+TOv9rkFiI22HiQkXLYc7qOPhLoNzc71BOn3CcueeEnHX73oxszChxgrkRrYzupx13BI81",
- "Cq8lLS2A7ou9SxlHJc02srDekptOZHRRmIMzHNAaQnXjs3bwPEQhQVLowPB1LtKrv1C1uYMzv/Rj9Y8f",
- "TkM2QDOQZEPV5mQWkzLC49WMNuWImYao4JNlMNVJvcS7Wt6BpWVU02BpDt64WGJRj/2Q6YGM6C4/4X9o",
- "Tsxnc7YN67fDnpBLZGDKHmfnZMiMtm8VBDuTaYBWCEEKq+ATo3UfBeWLZvL4Pk3ao2+sTcHtkFsE7pDY",
- "3fkx+FrsYjB8LXa9IyB2oO6CPsw4KEZqKNQE+F46yATuv0MflZLu+0jGsacg2SzQiK4KTwMPb3wzS2Oc",
- "PVsKeTPu02ErnDQmZ0LNqAHznXeQhE2rMnGkGDFb2QadgRov3zjT6A4fw1gLCxea/gFYUGbUu8BCe6C7",
- "xoIoSpbDHZD+Jsr0l1TB0yfk4i9nXzx+8uuTL740JFlKsZa0IMu9BkXuO92MKL3P4UF/ZagdVbmOj/7l",
- "M2+obI8bG0eJSqZQ0LI/lDWAWhHINiOmXR9rbTTjqmsApxzOSzCc3KKdWNu+Ae0lU0bCKpZ3shlDCMua",
- "WTLiIMngIDEdu7xmmn24RLmX1V2osiClkBH7Gh4xLVKRJ1uQiomIN+W1a0FcCy/elt3fLbTkmipi5kbT",
- "b8VRoIhQlt7x6XzfDn254w1uRjm/XW9kdW7eKfvSRr63JCpSgkz0jpMMltW6pQmtpCgIJRl2xDv6O9Ao",
- "ClyyAi40LcqfVqu7URUFDhRR2VgBysxEbAsj1ytIBbeREAe0MzfqFPR0EeNNdHoYAIeRiz1P0c54F8d2",
- "WHEtGEenh9rzNNBiDYw5ZOsWWd5eWx1Ch53qnoqAY9DxCj+joeMl5Jp+K+RlYwn8ToqqvHMhrzvn1OVQ",
- "txhnSslMX69DM77O29E3awP7SWyNn2VBL/zxdWtA6JEiX7H1RgdqxWspxOruYYzNEgMUP1ilLDd9+qrZ",
- "jyIzzERX6g5EsGawhsMZug35Gl2KShNKuMgAN79SceFsIF4DHcXo39ahvKc3Vs9agqGulFZmtVVJ0Hvb",
- "uy+ajglN7QlNEDVqwHdVOx1tKzudjQXIJdBsT5YAnIilcxA51xUukqLrWXvxxomGEX7RgquUIgWlIEuc",
- "YeogaL6dvTr0CJ4QcAS4noUoQVZU3hrYq+1BOK9gn2CghCL3v/9FPfgM8GqhaX4Asdgmht5azXdewD7U",
- "06YfI7ju5CHZUQnE3ytEC5Rmc9AwhMKjcDK4f12Iert4e7RsQaI/7g+leD/J7QioBvUPpvfbQluVA+F/",
- "Tr01Ep7ZME658IJVbLCcKp0cYsumUUsHNysIOGGME+PAA4LXK6q09SEznqHpy14nOI8VwswUwwAPqiFm",
- "5F+8BtIfOzX3IFeVqtURVZWlkBqy2Bo47Ebm+hF29VxiFYxd6zxakErBoZGHsBSM75BlV2IRRHXtanFB",
- "Fv3FoUPC3PP7KCpbQDSIGAPkwrcKsBuGQA0AwlSDaEs4THUop467ms+UFmVpuIVOKl73G0LThW19pn9u",
- "2vaJi+rm3s4EKIy8cu0d5NcWszb4bUMVcXCQgl4Z2QPNINbZ3YfZHMZEMZ5CMkb5qOKZVuEROHhIq3It",
- "aQZJBjnd9wf92X4m9vPYALjjjborNCQ2iim+6Q0l+6CRkaEFjqdiwiPBLyQ1R9CoAg2BuN4HRs4Ax44x",
- "J0dH9+qhcK7oFvnxcNl2qyMj4m24FdrsuKMHBNlx9CkAD+ChHvrmqMDOSaN7dqf4b1BuglqOOH6SPaih",
- "JTTjH7WAARuqCxAPzkuHvXc4cJRtDrKxA3xk6MgOGHRfU6lZykrUdb6H/Z2rft0Jom5GkoGmLIeMBB+s",
- "GliG/YmNv+mOeTNVcJLtrQ9+z/gWWU7OFIo8beCvYI8692sb2BmYOu5Cl42Mau4nygkC6sPFjAgeNoEd",
- "TXW+N4Ka3sCeXIMEoqplwbS2AdttVVeLMgkHiPo1RmZ0TjwbFOl3YIpX8QKHCpbX34r5zOoE4/BddhSD",
- "FjqcLlAKkU+wkPWQEYVgUrwHKYXZdeZix330sKekFpCOaaMHt77+76kWmnEF5L9FRVLKUeWqNNQyjZAo",
- "KKAAaWYwIlg9p4vsaDAEORRgNUn88vBhd+EPH7o9Z4qs4No/uDANu+h4+BDtOK+F0q3DdQf2UHPcziPX",
- "Bzp8zMXntJAuTzkcWeBGnrKTrzuD114ic6aUcoRrln9rBtA5mbspaw9pZFpUBY47yZcTDB1bN+77BSuq",
- "nOq78FrBluaJ2IKULIODnNxNzAT/Zkvzn+puB3S6JgqMFQVkjGrI96SUkIKNzjeimqrHPiE2bi/dUL5G",
- "CV2Kau0Cx+w4yGErZW0hsuK9IaJSjN7xBK3KMY7rgoX9Aw0jvwA1OlTXJG01hmtaz+fe5Ey5Cv3ORUz0",
- "Ua/UfDaoYhqkbhsV0yKn/cpkAvdtCVgBfpqJJ/ouEHVG2OjjK9wWQ71mc/8YG3kzdAzK/sRBKFvzcSia",
- "zei3+f4OpAw7EJFQSlB4J4R2IWW/ilX4osxdGmqvNBR907nt+uvA8XszqKAJnjMOSSE47KOPqBmHH/Bj",
- "9DjhvTTQGSWEob5dob8Ffwes9jxTqPG2+MXd7p7QrotIfSvkXfkg7YCT5ekJLr+D/m035U0dkzTPI748",
- "996kywDUvH7fziShSomUoZB0nqm5PWjO/ecep7TR/7qOor2Ds9cdt+O0Cp8yolEW8pJQkuYMTbaCKy2r",
- "VL/jFI1CwVIj0UZe+x02E77wTeJ2yYjZ0A31jlOMNKtNRdEIiRVE7CLfAnhroarWa1C6o1ysAN5x14px",
- "UnGmca7CHJfEnpcSJIb8nNiWBd2TlaEJLcjvIAVZVrotbuNzKqVZnjsPmpmGiNU7TjXJgSpNfmD8cofD",
- "eS+7P7Ic9LWQVzUW4rf7GjgoppJ4VNR39isGrLrlb1zwKj5/t5+tz8WM37y52qPNqHnS/X/u/9fp27Pk",
- "bzT5/VHy/H8s3n949vHBw96PTz5+9dX/bf/09ONXD/7rP2M75WGPPfZxkJ+/dKro+UvUNxqnSw/2T2Zw",
- "LxhPokQWhk90aIvcx4etjoAetK1RegPvuN5xQ0hbmrPM8JabkEP3humdRXs6OlTT2oiO9cmv9Ugp/hZc",
- "hkSYTIc13liK6gcSxp/VoRfQvZTD87KquN1KL33bVyM+oEus5vXTSZtV5ZTgu7oN9dGI7s8nX3w5mzfv",
- "4ervs/nMfX0foWSW7WKvHjPYxZQzd0DwYNxTpKR7BTrOPRD2aOyaDaYIhy3AaPVqw8pPzymUZss4h/Ox",
- "+M7Is+Pn3AbJm/ODPsW9c1WI1aeHW0uADEq9iWVbaAlq2KrZTYBOnEcpxRb4nLATOOkaWTKjL7oouhzo",
- "Cl/9o/YppmhD9TmwhOapIsB6uJBJlowY/aDI47j1x/nMXf7qztUhN3AMru6ctQPR/60FuffdN5dk4Rim",
- "umcf4NqhgyeTEVXavQpqRQAZbmZzzFgh7x1/x1/CinFmvp++4xnVdLGkiqVqUSmQX9Oc8hRO1oKc+odG",
- "L6mm73hP0hpMAxU88SJltcxZSq5ChaQhT5vaoz/Cu3dvab4W79697wVD9NUHN1WUv9gJEiMIi0onLjFB",
- "IuGaypizSdUP03Fkm3lkbFYrZIvKWiR94gM3fpzn0bJU3Qeq/eWXZW6WH5Chcs8vzZYRpYX0sogRUCw0",
- "uL8/CncxSHrt7SqVAkV+K2j5lnH9niTvqkePngJpvdj8zV35hib3JUy2rgw+oO0aVXDhVq2EnZY0Kek6",
- "5tN69+6tBlri7qO8XKCNI88Jdmu9FPWR8DhUswCPj+ENsHAc/eoNF3dhe/kkVPEl4CfcQmxjxI3G037T",
- "/Qrejt54uzrvT3u7VOlNYs52dFXKkLjfmTo3zdoIWT78QbE1aqsujc8SSLqB9MrlV4Gi1Pt5q7uPsHGC",
- "pmcdTNnMO/blF+Z+QI/AEkhVZtSJ4pTvu4/wFWjt43jfwBXsL0WTOuKYV/ftR+Bq6KAipQbSpSHW8Ni6",
- "Mbqb78K4ULEvS/+WGh/VebI4renC9xk+yFbkvYNDHCOK1iPlIURQGUGEJf4BFNxgoWa8W5F+bHlGy1ja",
- "my+ShcfzfuKaNMqTi7gKV4NWd/u9AEzjJa4VWVIjtwuXgco+dA64WKXoGgYk5NApM/E5ccuRg4Mcuvei",
- "N51YdS+03n0TBdk2Tsyao5QC5oshFVRmOnF2fibr93OeCUws6RC2zFFMqgMSLdOhsuUcs5nyhkCLEzBI",
- "3ggcHow2RkLJZkOVT46FOcT8WZ4kA/yBD/fH0rWcByFiQaKwOhmL57ndc9rTLl3SFp+pxadnCVXLCalW",
- "jISPUemx7RAcBaAMcljbhdvGnlCaJALNBhk4flqtcsaBJLFos8AMGlwzbg4w8vFDQqwFnkweIUbGAdjo",
- "z8aByY8iPJt8fQyQ3CVBoH5s9IQHf0P8vZaNvzYijygNC2cDXq3UcwDqQhTr+6sTKIvDEMbnxLC5Lc0N",
- "m3MaXzNIL2sIiq2dHCEuouLBkDg74gCxF8tRa7JX0U1WE8pMHui4QDcC8VLsEvtgMyrxLndLQ+/RkHR8",
- "Pho7mDY/yz1FlmKHUTp4tdgQ6AOwDMPhwQg0/B1TSK/Yb+g2t8CMTTsuTcWoUCHJOHNeTS5D4sSUqQck",
- "mCFyuR+kXLkRAB1jR5O/2Cm/B5XUtnjSv8ybW23epBLzr31ix3/oCEV3aQB/fStMnSTldVdiidop2sEm",
- "7fwwgQgZI3rDJvpOmr4rSEEOqBQkLSEquYp5To1uA3jjXPhugfECs9BQvn8QRDBJWDOloTGi+ziJz2Ge",
- "pJj8TojV8Op0KVdmfW+EqK8p60bEjq1lfvIVYAjwikmlE/RARJdgGn2rUKn+1jSNy0rtGCmbKpZlcd6A",
- "017BPslYXsXp1c37/Usz7Y81S1TVEvkt4zZgZYmpjaORkyNT2+Da0QW/sgt+Re9svdNOg2lqJpaGXNpz",
- "/Iuciw7nHWMHEQKMEUd/1wZROsIggxevfe4YyE2Bj/9kzPraO0yZH/tg1I5/dzt0R9mRomsJDAajq2Do",
- "JjJiCdNBZuD+U9SBM0DLkmW7ji3UjjqoMdOjDB4+n1oHC7i7brADGAjsnrHXMBJUO3VeI+DbHM+tzDUn",
- "kzBz2U5wFzKEcCqmfIWCPqLq13KHcHUJNP8e9r+Ytric2cf57Ham0xiu3YgHcP263t4ontE1b01pLU/I",
- "kSinZSnFluaJMzAPkaYUW0ea2Nzboz8xq4ubMS+/OXv12oH/cT5Lc6AyqUWFwVVhu/JfZlU2S9/AAfEZ",
- "0I3O52V2K0oGm1+nFguN0tcbcKmkA2m0l/OycTgER9EZqVfxCKGDJmfnG7FLHPGRQFm7SBrznfWQtL0i",
- "dEtZ7u1mHtqBaB5c3LTEqVGuEA5wa+9K4CRL7pTd9E53/HQ01HWAJ4VzjSS7Lmw+d0UE77rQMeZ5Xzqv",
- "e0ExY6W1ivSZE68KtCQkKmdp3MbKl8oQB7e+M9OYYOMBYdSMWLEBVyyvWDCWaTYlJ00HyGCOKDJVNC1O",
- "g7ulcLV6Ks7+UQFhGXBtPkk8lZ2DiulNnLW9f50a2aE/lxvYWuib4W8jY4TZWrs3HgIxLmCEnroeuC9r",
- "ldkvtLZImR8Cl8QRDv9wxt6VOOKsd/ThqNkGL27aHrewtE6f/xnCsDnWD9f18cqrSxs7MEe0Tg9TyUqK",
- "3yGu56F6HHlo5PPTMoxy+R3Chw5hdYoWi6mtO025oWb2we0ekm5CK1Q7SGGA6nHnA7ccJsr0FmrK7Vbb",
- "shmtWLc4wYRRpQs7fkMwDuZeJG5Or5c0lkXUCBkGprPGAdyypWtBfGePe1W/trCzk8CXXLdl9hF5CbJ5",
- "A9hPSHNDgcFOO1lUaCQDpNpQJphb/1+uRGSYil9TbquvmH72KLneCqzxy/S6FhJTQKi42T+DlBU0j0sO",
- "Wdo38WZszWxhkUpBULnCDWSLNlkqctU/6jdEDjXnK/JoHpTPcbuRsS1TbJkDtnhsWyypQk5eG6LqLmZ5",
- "wPVGYfMnE5pvKp5JyPRGWcQqQWqhDtWb2nm1BH0NwMkjbPf4ObmPbjvFtvDAYNHdz7PTx8/R6Gr/eBS7",
- "AFxhmDFukiE7+atjJ3E6Rr+lHcMwbjfqSfS1vK0MN8y4Rk6T7TrlLGFLx+sOn6WCcrqGeKRIcQAm2xd3",
- "Ew1pHbzwzJY1UlqKPWE6Pj9oavjTQPS5YX8WDJKKomC6cM4dJQpDT01ZCjupH87WSHIZhT1c/iP6SEvv",
- "IuookZ/WaGrvt9iq0ZP9Iy2gjdY5oTbvR86a6AWf55yc+7RCmGK5zqxscWPmMktHMQeDGVaklIxrVCwq",
- "vUr+TNINlTQ17O9kCNxk+eWzSFrpdnpTfhzgnxzvEhTIbRz1coDsvQzh+pL7XPCkMBwle9C89ghO5aAz",
- "N+62G/Idjg89VSgzoySD5Fa1yI0GnPpWhMdHBrwlKdbrOYoej17ZJ6fMSsbJg1Zmh35+88pJGYWQsVyB",
- "zXF3EocELRlsMXYvvklmzFvuhcwn7cJtoP+8ngcvcgZimT/LMUXgaxHRTn2q89qS7mLVI9aBoWNqPhgy",
- "WLqh5qSdVvrT89G7iYKKe7q8Ybvv2DJfPB7wjy4iPjO54AY2vny7kgFCCdLqR0kmq78HPnZKvha7qYTT",
- "OYWeeP4JUBRFScXy7Jfm5WenaoGkPN1EfWZL0/HXpr5avTh7B0bT/m0o55BHh7Py5q9eLo1Izn8XU+cp",
- "GJ/YtltIwS63s7gG8DaYHig/oUEv07mZIMRq+1FdHbSdr0VGcJ4mx1xzXPsFOII06f+oQOnYAyX8YAPH",
- "0DZq2IHN0k2AZ6iRnpDvbAnlDZBWAiHUBH2miPar6arMBc3mmMHi8puzV8TOavvYKkE2S/gaFaH2Kjo2",
- "sSB95rQQZF/wJ/48Yvo44/HaZtVKJ3VS79gDVNOiSTvOOn4CVJFC7JyQl0ExVPtW1Qxh6GHFZGG0uno0",
- "Kx8hTZj/aE3TDap9LdY6TPLT09t7qlRBScm6NFSdUxLPnYHbZbi3Ce7nRBjd/JopWzkXttB+81o/AHdm",
- "B/8Gtr08WXFuKeXkiFuuziB5LNo9cPaK9K6EKGQdxB8p9NvqEMdm+7/AXtEUV93SAb1akvYFZV3yx1dE",
- "TykXnKWYYCp2RbsSu1P8bBNycXUNuf6IuxMaOVzRggV1KJ7D4mAJA88IHeL6hv7gq9lUSx32T421XDdU",
- "kzVo5TgbZHNfd8PZGhlX4HKEYkHmgE8K2fJdIoeMusOT2m1yJBnh05sB5fFb8+1HZ1rAmPQrxlGJcGhz",
- "gp+1BmIFUG00D6bJWoBy62m/P1ZvTZ8TfIqbwe79ia8YimNY159ZtvVz94c6815v52U2bV+Yti5BUv1z",
- "K8rZTnpWlm7S4aosUXlA7/gggiPey8S7jwLk1uOHo42Q22i4Ct6nhtBgi85uKPEe7hFGXaGkU/3KCK2W",
- "orAFsWFi0SwJjEfAeMU4NPVsIxdEGr0ScGPwvA70U6mk2oqAk3jaJdAcPdwxhqa0c2/cdqhueiiDElyj",
- "n2N4G5viKgOMo27QCG6U7+syuoa6A2HiBdbvdojsl0pBqcoJURm+WugUT4kxDsO4fXmm9gXQPwZ9mch2",
- "15Lak3PMTTT0EHVZZWvQCc2yWMrWr/Erwa8kq1BygB2kVZ3asyxJinlX2olo+tTmJkoFV1UxMpdvcMvp",
- "gmpEEWoIKyL5HcaHLss9/hvLazm8My7Q4+hQQx/VkR2XfakfOhmTeg1NJ4qtk+mYwDvl9uhopr4ZoTf9",
- "75TSc7FuA/KJ00+Mcblwj2L87RtzcYTZGXrJWu3VUidPwMA+4WtIotpYP/ttcyW8ynrZW9GhVNeoGzdA",
- "DFebm+PlNxDeGyTdoPZ+tR7KoSDfdDAmnWr3Ok5TMsqCBl8c2Qgh+7YIoYhbZ4eigmxQkPnc6z1NMuzJ",
- "2Tqe+DBAqA836wP0vY9lJSVlzv3eMIs+Zl3Ue/8dwpR42GaDu4twseSDFrvvt0Nx3z4ZG37vVqO6Avdk",
- "vpSwZaLyjm0f+eRVQvtrq7ZTHXkfXX/f8IpTfV5z6KDx9tJVBbDLdDr597/YODkCXMv9P4Ept7fpvTpX",
- "fWnXmqeaJqROKD0pwXTrVpySqDCWE8/Jhq1KWwfqhPXI6uUUcaBf92s+O8+OujBjeRVndpTYsYtX8RpO",
- "O9WkmsIjVgrFmrzusfJeE0MML7FCV5A2qz+Wj+/ZQqoxmX8TtyABjkmiZSYLCob+O/3UgDpdR2K6rFNj",
- "qab6GfwP3PG912DBi0ab/fxkemKlszo6Dfk0ZkNeA3c1O9vvPCZHm69WkGq2PfD67q8b4MHLrrm3y9ja",
- "28FjPFZHL2PyluOtjg1AY4/jRuEJkijeGpyhtzdXsL+nSIsaounY5/6qvUneDsQAcofEkIhQsegPa0h2",
- "DnmmaspALPhoK9sdmgxog5WcgrekN5zLk6S5OJr3pSNTxkvJTJrLdD3q1TUG4g490OtXohjWP15i4Q9V",
- "V1n0eT9CLZ2c97MjXru8IfhWsvad+AwioPxv/mG0nSVnVxDWmkJP1TWVmW8RNb14q04ych/1XtX5Kgpd",
- "oFf1zKyJje2/o4rk28II6DQXRoxIhsLI2+GodSzHPWWDbmz6dwy0NXCtQLqafCj/5kJBooWPpR2DYwwV",
- "NrLoRkhQgzkuLXCDmWfeNKl1MNcvxUwz1AUUhQskEgpqoJNBApzhOceQ/cJ+9w+HfK7Xgxamml4PFx3w",
- "UdFM9ZAYUv2KuNvy8IOkmxibGOe27rOKZcPhINvekFKKrErtBR0ejNogNznX1Agridpp0v4qOzpC8Krz",
- "CvYLqwT5ag1+B0OgreRkQQ+yKHQ2+U7NbyoG9/pOwPuclqv5rBQiTwacHef9FD5dir9i6RVkxNwUPnpw",
- "oPINuY829tqbfb3Z+5Q1ZQkcsgcnhJxxG6/tHdvtHNKdyfk9PTb/DmfNKptVyxnVTt7xeOAr5ruSt+Rm",
- "fphxHqbAsLpbTmUHOZAgZjeQPkjS60gdqJOpWnnf1dytzdMQlYUiJpM0ZWcOxMnUITJN5Y8mTKYvHeS5",
- "uE6QipI6/1dM5zDt2kzSZzxtuhlsLyGIt6HKXaB7sqEZSYWUkIY94k8cLFCFkJDkAsNvYp7BlTbyUIFx",
- "zZzkYk1EadRcm0bP+1CiZWmCuewzW9szsY6agUQGoNyzWjeNbdyfZ6R6zfGVcS43EXsLItpj+ejyN45Q",
- "jq5aEYA5gUAP25rOYtV92uvq1ocaqtamRcHSOLr/taJMBmNDDtQuiqyvJkdXWsm/ChzAVdRlO+4htXXo",
- "llP9pHXO5InHIgBg2HPagmGS//RYMFZY1zGhESSf11LrvFV2l3XOvs9nZ2k8pVZr3QAxY1cS3Cs1W4Cu",
- "UzmnpHrjbzHTvK9bGj0FFD4hs+U/qLKWEG+RcdXvuuKBKJMcttByKLunc1WaglJsC2HlPNuZZAAl2ie7",
- "UnPMUxpyuY4o5daeBL62KdiNylYWsXanyAHBKSrm7Xhij4maepQMRFuWVbSFP3WLWmRDZcgibNjDOpFT",
- "HM0k4osbYxEHYxuQ5qPnksdDG8KXm7VRBGfLauOpJcLmZKuSXvNhJSJid6r97bdfB8HBiOq8pB688mW9",
- "KzdVIAcpY4wwevUDozKHAl//NUx64sUt1zciY1lTF1ORAZhqzjNG70ETHRY0K+ieZGy1AmmN+UpTnlGZ",
- "hc0ZJylITZnRbPbq5mKtgVZWMD8o2RruioN6BhOTcdEuZQHJ905luIXUiZ6biMRpr1othkok9nYl/pyA",
- "7ox0jXFVA0TgHkKjbG0PmOAoIJGCXsGR8yj2O4xPg+lJnO1PC5x1yhQxX+sNc6tNYt39MITI7RYUQxz3",
- "DIWpF5s3XdJGs6Al2V+QXRr/obk4p5Vl9B0OgBc6DIPCjN5248D5zI+jfqiREizl/RAltJZ/yAfpFthI",
- "GsEWOUagNdhEuDagvr0vgYNZvaj9tkM1RLvuXcyzKLgt8tdzC1veZKv2BYRjzoLc0vzTu3YxAecZ4gOy",
- "N8PG4NA3GCLZolLd7GXCKzpp7sAPeHdT89foiv4rmD2KaqVuKCfC1GK9D+bBm4Xm1nCx8iW8tsDJNY5p",
- "49gef0mW7uV2KSFlqisaXfvqGrUrDItNudcgO33A93Zonb8IfQsyXnlNg/zYZOpHHX/NGwibI/qZmcrA",
- "yY1SeYz6emQRwV+MR4Up1A5cF1etADdb+aTzckNIuONAtyBk/chAt35yuKnLs8Fc5tKpFPTXOfm2buE2",
- "clE3a5sapdlH7lg69ynBlfEqDaY7RndahGCJE4Kgkt8e/0YkrLCGoSAPH+IEDx/OXdPfnrQ/m+P88GFU",
- "OvtkcZ0WR24MN2+MYn4ZeulnX7MNPCrt7EfF8uwQYbSeCDdVQPER7K8uEcFnqUP6q4016R9VVwvuFgFy",
- "FjGRtbYmD6YKHv9OePfrukVe+aIfJ60k03vMj+jtB+zXaATqd3U0k4uGq/VDd/dpcQV1hs0m9qlS/nb9",
- "TtAc7yOrtnJzC4n8hHyzo0WZgzsoX91b/gme/vlZ9ujp4z8t//zoi0cpPPvi+aNH9Pkz+vj508fw5M9f",
- "PHsEj1dfPl8+yZ48e7J89uTZl188T58+e7x89uXzP90zfMiAbAGd+Ww8s/+NxXqTs9fnyaUBtsEJLdn3",
- "sLd1AQ0Z+4qDNMWTCAVl+ezU//Q//Qk7SUXRDO9/nblkH7ON1qU6XSyur69Pwi6LNQY7JFpU6Wbh5+mV",
- "JDx7fV57iawVCHfUvpP11j1PCmf47c03F5fk7PX5SVCv/nT26OTRyWMsb14CpyWbnc6e4k94eja47wtH",
- "bLPTDx/ns8UGaI6xgeaPArRkqf8kgWZ79391TddrkCeuDKP5aftk4cWKxQcX9PFx7NsirGiy+NCKjckO",
- "9MSKB4sPPpHfeOtWpjwXExR0mAjFWLPFEvODTG0KKmg8vBRUNtTiA4rLg78vXEKD+EdUW+x5WPgAsnjL",
- "FpY+6J2BtdMjpTrdVOXiA/4H6TMAyz4fWugdX6DtY/GhtRr3ubea9u9N97DFthAZeIDFamUTk459Xnyw",
- "/wYTwa4EyYzghyF77lcbWr2wxfD7P++5e9WYQywg7meuwCqmPp3BnqdNgH99ZM8z3/hiz1MvofpnMngQ",
- "nzx6ZKd/hv+5m8Kn7Qc7kfKnFzW8mFsOI6YQhsefDoZzjhGlhn8Ry58/zmdffEosnBudndOcYEs7/dNP",
- "uAkgtywFcglFKSSVLN+Tn3mdhCBIbhijwCsurrmH3FzuVVFQuUehuRBbUHUB8oY4iQQjprha5FIUAQ3j",
- "7ULXCsMhsKzEbG6fZ71HwUjHZARvr+nP5G1VzeDtU/HdwTNx02LeI/Fwk+A8EMBqh59S+7iuLdx5Q2Sn",
- "uhfboNm/GcG/GcEdMgJdST54RIP7C4O6obQRbCSl6QbG+EH/tgwu+FkpYsFRFyPMwqVOGeIVF21eEVQu",
- "OX07LWWZczBY23EGirls7qg3GKG4EetlzZH8mUf3VLDXY/loP77/p7jfX1Duz3Nrx21cIZU5A1lTAeX9",
- "bDb/5gL/33ABm5aL2n2dEw15rsKzrwWefetscW91uHWCTeQD3cq4sZ8XH9qVmVpKgtpUOhPXQV80mVt/",
- "T193qGuVtv5eXFOmk5WQ7p0OZs7ud9ZA84VLytP5tXkH3/uCj/uDH8N4k+ivi7owQfRjVx2NfXXq2EAj",
- "7/72nxvTVGjqQQ5ZG3nevjf8CdPeOubZWC5OFwuMfd8IpRezj/MPHatG+PF9TRI+V+GslGyLqQ/ef/x/",
- "AQAA//+92+QcXcsAAA==",
+ "H4sIAAAAAAAC/+x9a5PcNpLgX0HUboRsXbFbL3vHipjYa0u2t8+yrXC3vbcr6TwoMqsK0yyAA4DdVdbp",
+ "v18gEyBBEqxidbelmYv5JHURj0QikchM5OP9LFebSkmQ1syev59VXPMNWND4F89zVUubicL9VYDJtais",
+ "UHL2PHxjxmohV7P5TLhfK27Xs/lM8g20bVz/+UzD32qhoZg9t7qG+czka9hwN7DdVa51M9I2W6nMD3FG",
+ "Q5y/nH3Y84EXhQZjhlD+JMsdEzIv6wKY1VwanrtPht0Iu2Z2LQzznZmQTElgasnsutOYLQWUhTkJi/xb",
+ "DXoXrdJPPr6kDy2ImVYlDOF8oTYLISFABQ1QzYYwq1gBS2y05pa5GRysoaFVzADX+ZotlT4AKgERwwuy",
+ "3syev5kZkAVo3K0cxDX+d6kBfofMcr0CO3s3Ty1uaUFnVmwSSzv32Ndg6tIahm1xjStxDZK5Xifsh9pY",
+ "tgDGJfv52xfs6dOnX7mFbLi1UHgiG11VO3u8Juo+ez4ruIXweUhrvFwpzWWRNe1//vYFzn/hFzi1FTcG",
+ "0oflzH1h5y/HFhA6JkhISAsr3IcO9bseiUPR/ryApdIwcU+o8b1uSjz/J92VnNt8XSkhbWJfGH5l9DnJ",
+ "w6Lu+3hYA0CnfeUwpd2gbx5lX717/3j++NGHf3lzlv23//OLpx8mLv9FM+4BDCQb5rXWIPNdttLA8bSs",
+ "uRzi42dPD2at6rJga36Nm883yOp9X+b6Euu85mXt6ETkWp2VK2UY92RUwJLXpWVhYlbL0rEpN5qndiYM",
+ "q7S6FgUUc8d9b9YiX7OcGxoC27EbUZaOBmsDxRitpVe35zB9iFHi4LoVPnBBf7/IaNd1ABOwRW6Q5aUy",
+ "kFl14HoKNw6XBYsvlPauMsddVuxyDQwndx/oskXcSUfTZbljFve1YNwwzsLVNGdiyXaqZje4OaW4wv5+",
+ "NQ5rG+aQhpvTuUfd4R1D3wAZCeQtlCqBS0ReOHdDlMmlWNUaDLtZg137O0+DqZQ0wNTir5Bbt+3/6+Kn",
+ "H5nS7Acwhq/gNc+vGMhcFVCcsPMlk8pGpOFpCXHoeo6tw8OVuuT/apSjiY1ZVTy/St/opdiIxKp+4Fux",
+ "qTdM1psFaLel4QqximmwtZZjANGIB0hxw7fDSS91LXPc/3bajiznqE2YquQ7RNiGb//8aO7BMYyXJatA",
+ "FkKumN3KUTnOzX0YvEyrWhYTxBzr9jS6WE0FuVgKKFgzyh5I/DSH4BHyOHha4SsCJwwyCk4zywFwJGwT",
+ "NONOt/vCKr6CiGRO2C+eueFXq65ANoTOFjv8VGm4Fqo2TacRGHHq/RK4VBaySsNSJGjswqPDMRhq4znw",
+ "xstAuZKWCwmFY84ItLJAzGoUpmjC/frO8BZfcANfPhu749uvE3d/qfq7vnfHJ+02NsroSCauTvfVH9i0",
+ "ZNXpP0E/jOc2YpXRz4ONFKtLd9ssRYk30V/d/gU01AaZQAcR4W4yYiW5rTU8fysfur9Yxi4slwXXhftl",
+ "Qz/9UJdWXIiV+6mkn16plcgvxGoEmQ2sSYULu23oHzdemh3bbVKveKXUVV3FC8o7iutix85fjm0yjXks",
+ "YZ412m6seFxugzJybA+7bTZyBMhR3FXcNbyCnQYHLc+X+M92ifTEl/p3909Vla63rZYp1Do69lcymg+8",
+ "WeGsqkqRc4fEn/1n99UxASBFgrctTvFCff4+ArHSqgJtBQ3KqyorVc7LzFhucaR/1bCcPZ/9y2lrfzml",
+ "7uY0mvyV63WBnZzISmJQxqvqiDFeO9HH7GEWjkHjJ2QTxPZQaBKSNtGRknAsuIRrLu1Jq7J0+EFzgN/4",
+ "mVp8k7RD+O6pYKMIZ9RwAYYkYGr4wLAI9QzRyhCtKJCuSrVofvjsrKpaDOL3s6oifKD0CAIFM9gKY83n",
+ "uHzenqR4nvOXJ+y7eGwUxZUsd+5yIFHD3Q1Lf2v5W6yxLfk1tCM+MAy3U+kTtzUBDU7Mvw+KQ7VirUon",
+ "9RykFdf4P3zbmMzc75M6/2OQWIzbceJCRctjjnQc/CVSbj7rUc6QcLy554Sd9fvejmzcKGmCuRWt7N1P",
+ "GncPHhsU3mheEYD+C92lQqKSRo0I1jty04mMLglzdIYjWkOobn3WDp6HJCRICj0Yvi5VfvUf3Kzv4cwv",
+ "wljD44fTsDXwAjRbc7M+maWkjPh4taNNOWKuISr4bBFNddIs8b6Wd2BpBbc8WpqHNy2WEOqxHzI90And",
+ "5Sf8Dy+Z++zOtmP9NOwJu0QGZug4+0eGwmn7pCDQTK4BWiEU25CCz5zWfRSUL9rJ0/s0aY++IZuC3yG/",
+ "CNwhtb33Y/C12qZg+FptB0dAbcHcB324cVCMtLAxE+B76SFTuP8efVxrvhsiGceegmS3QCe6GjwNMr7x",
+ "3SytcfZsofTtuE+PrUjWmpwZd6NGzHfeQxI2ravMk2LCbEUNegO1r3z7mUZ/+BTGOli4sPwPwIJxo94H",
+ "FroD3TcW1KYSJdwD6a+TTH/BDTx9wi7+4+yLx09+e/LFl44kK61Wmm/YYmfBsM+8bsaM3ZXw+XBlqB3V",
+ "pU2P/uWzYKjsjpsax6ha57Dh1XAoMoCSCETNmGs3xFoXzbjqBsAph/MSHCcntDOy7TvQXgrjJKzN4l42",
+ "YwxhRTtLwTwkBRwkpmOX106zi5eod7q+D1UWtFY6YV/DI2ZVrsrsGrQRKvGa8tq3YL5FEG+r/u8ELbvh",
+ "hrm50fRbSxQoEpRlt3I636ehL7eyxc1ezk/rTazOzztlX7rID5ZEwyrQmd1KVsCiXnU0oaVWG8ZZgR3x",
+ "jv4OLIoCl2IDF5Zvqp+Wy/tRFRUOlFDZxAaMm4lRCyfXG8iVJE+IA9qZH3UKevqICSY6Ow6Ax8jFTuZo",
+ "Z7yPYzuuuG6ExEcPs5N5pMU6GEsoVh2yvLu2OoYOmuqBSYDj0PEKP6Oh4yWUln+r9GVrCfxOq7q6dyGv",
+ "P+fU5XC/GG9KKVzfoEMLuSq73jcrB/tJao2fZEEvwvH1a0DokSJfidXaRmrFa63U8v5hTM2SAhQ/kFJW",
+ "uj5D1exHVThmYmtzDyJYO1jL4RzdxnyNL1RtGWdSFYCbX5u0cDbir4EPxfi+bWN5z65Jz1qAo66c1261",
+ "dcXw9XZwX7QdM57TCc0QNWbk7ap5dKRWNB35ApQaeLFjCwDJ1MI/EPmnK1wkx6dnG8QbLxom+EUHrkqr",
+ "HIyBIvOGqYOghXZ0ddg9eELAEeBmFmYUW3J9Z2Cvrg/CeQW7DB0lDPvs+1/N558AXqssLw8gFtuk0Nuo",
+ "+f4VcAj1tOn3EVx/8pjsuAYW7hVmFUqzJVgYQ+FROBndvz5Eg128O1quQeN73B9K8WGSuxFQA+ofTO93",
+ "hbauRtz/vHrrJDy3YZJLFQSr1GAlNzY7xJZdo44O7lYQccIUJ8aBRwSvV9xYekMWskDTF10nOA8JYW6K",
+ "cYBH1RA38q9BAxmOnbt7UJraNOqIqatKaQtFag0Stnvm+hG2zVxqGY3d6DxWsdrAoZHHsBSN75FFKyEE",
+ "cds8tXgni+Hi8EHC3fO7JCo7QLSI2AfIRWgVYTd2gRoBRJgW0UQ4wvQop/G7ms+MVVXluIXNatn0G0PT",
+ "BbU+s7+0bYfExW17bxcKDHpe+fYe8hvCLDm/rblhHg624VdO9kAzCD12D2F2hzEzQuaQ7aN8VPFcq/gI",
+ "HDykdbXSvICsgJLvhoP+Qp8Zfd43AO54q+4qCxl5MaU3vaXk4DSyZ2iF45mU8MjwC8vdEXSqQEsgvveB",
+ "kQvAsVPMydPRg2YonCu5RWE8XDZtdWJEvA2vlXU77ukBQfYcfQrAI3hohr49KrBz1uqe/Sn+C4yfoJEj",
+ "jp9kB2ZsCe34Ry1gxIbqHcSj89Jj7z0OnGSbo2zsAB8ZO7IjBt3XXFuRiwp1ne9hd++qX3+C5DMjK8By",
+ "UULBog+kBlZxf0b+N/0xb6cKTrK9DcEfGN8SyymFQZGnC/wV7FDnfk2OnZGp4z502cSo7n7ikiGgwV3M",
+ "ieBxE9jy3JY7J6jZNezYDWhgpl5shLXksN1Vda2qsniA5LvGnhn9Ix45RYYdmPKqeIFDRcsbbsV8RjrB",
+ "fvgue4pBBx1eF6iUKidYyAbISEIwyd+DVcrtuvC+48F7OFBSB0jPtPEFt7n+H5gOmnEF7L9UzXIuUeWq",
+ "LTQyjdIoKKAA6WZwIlgzp/fsaDEEJWyANEn88vBhf+EPH/o9F4Yt4SYEXLiGfXQ8fIh2nNfK2M7hugd7",
+ "qDtu54nrAx983MXntZA+TznsWeBHnrKTr3uDN69E7kwZ4wnXLf/ODKB3MrdT1h7TyDSvChx30ltONHRq",
+ "3bjvF2JTl9zex6sVXPMyU9egtSjgICf3Ewslv7nm5U9NNwwmgdzRaA5ZjiEQE8eCS9eHoiYO6YatN5nY",
+ "bKAQ3EK5Y5WGHMjL34l8poHxhJH/X77mcoWSvlb1yjug0TjIqWtDNhVdy8EQSWnIbmWG1ukU5/ZOxyHQ",
+ "w8lBwJ0u1jdtk+Zxw5v5fGzPlCs1Ql7f1J983ZrPRlVVh9TrVlUl5HSjVSZw8Y6gFuGnnXjiGwiizgkt",
+ "Q3zF2+JOgdvcP8bW3g6dgnI4ceQS134c84pzenK5uwdphQZiGioNBu+W2L5k6KtaxpFp/vIxO2NhMzTB",
+ "U9ffRo7fz6OKnpKlkJBtlIRdMhhbSPgBPyaPE95vI51R0hjr21ceOvD3wOrOM4Ua74pf3O3+Ce0/NZlv",
+ "lb6vt0wacLJcPuHp8OA7uZ/ytg+cvCwTb4I+bqXPAMy8iZMXmnFjVC5Q2DovzJwOmn9G9EEuXfS/brxx",
+ "7+Hs9cftPX7FIZFo3IWyYpzlpUDTr5LG6jq3byVH41K01ITXUtCix82NL0KTtH0zYX70Q72VHD3WGpNT",
+ "0tNiCQn7yrcAwepo6tUKjO0pKUuAt9K3EpLVUlica+OOS0bnpQKNrkMn1HLDd2zpaMIq9jtoxRa17Yrt",
+ "GJZlrChL/xLnpmFq+VZyy0rgxrIfhLzc4nDhtT4cWQn2RumrBgvp230FEowwWdq76jv6io6vfvlr7wSL",
+ "YfT0md5u3Pht7NYObU9taPj/+ezfn785y/6bZ78/yr76H6fv3j/78PnDwY9PPvz5z/+3+9PTD3/+/N//",
+ "NbVTAfZU0JCH/PylV2nPX6Le0j7eDGD/aIb7jZBZkshiN4webbHPMEDWE9DnXauWXcNbabfSEdI1L0Xh",
+ "eMttyKF/wwzOIp2OHtV0NqJnxQprPVIbuAOXYQkm02ONt5aihg6J6fA8fE30EXd4Xpa1pK0M0jdFnwTH",
+ "MLWcNyGYlJ3lOcP4vDUPXo3+zydffDmbt3F1zffZfOa/vktQsii2qejJArYpJc8fEDwYDwyr+M6ATXMP",
+ "hD3pA0dOGfGwG9gsQJu1qD4+pzBWLNIcLvj0e2PRVp5LcrZ35wffJnf+yUMtPz7cVgMUUNl1KmtDR1DD",
+ "Vu1uAvT8RSqtrkHOmTiBk76xpnD6ovfGK4EvMXsAap9qijbUnAMitEAVEdbjhUyyiKToB0Uez60/zGf+",
+ "8jf3rg75gVNw9edsHiLD31axB999c8lOPcM0DyiQl4aOQi8TqrSPLup4EjluRrlqSMh7K9/Kl7AUUrjv",
+ "z9/Kglt+uuBG5Oa0NqC/5iWXOZysFHseApZecsvfyoGkNZpOKgoVY1W9KEXOrmKFpCVPShEyHOHt2ze8",
+ "XKm3b98NnCqG6oOfKslfaILMCcKqtplPcJBpuOE69WhlmgB3HJkymOyblYRsVZNlMyRQ8OOneR6vKtMP",
+ "dB0uv6pKt/yIDI0P43RbxoxVOsgiTkAhaHB/f1T+YtD8JthVagOG/WXDqzdC2ncse1s/evQUWCfy8y/+",
+ "ync0uatgsnVlNBC3b1TBhZNaCVureVbxVept7O3bNxZ4hbuP8vIGbRxlybBbJ+I0eNTjUO0CAj7GN4Dg",
+ "ODp6Dhd3Qb1CMqv0EvATbiG2ceJG+2J/2/2KYlBvvV29ONbBLtV2nbmznVyVcSQedqbJcbNyQlZwozBi",
+ "hdqqTwe0AJavIb/yeVpgU9ndvNM9eOp4QTOwDmEogw9FkGEOCXxZWACrq4J7UZzLXT+Y34C1wR/4Z7iC",
+ "3aVqU1AcE73fDSY3YwcVKTWSLh2xxsfWj9HffO8Ohop9VYWYbAzOC2TxvKGL0Gf8IJPIew+HOEUUnWDn",
+ "MURwnUAEEf8ICm6xUDfenUg/tTynZSzo5ktk8wm8n/kmrfLkPbfi1aDVnb5vANOBqRvDFtzJ7cpnsqKA",
+ "6YiL1YavYERCjh93JoYldx6EcJBD917yplPL/oU2uG+SIFPjzK05SSngvjhSQWWm568XZqL3Q/8ygQkq",
+ "PcIWJYpJjWMjMR2uO49slHFvDLQ0AYOWrcARwOhiJJZs1tyEJFuYiyyc5UkywB+YAGBf2pfzyNUsSjjW",
+ "JHUJPLd/TgfapU/+EjK+hDQvsWo5IWWLk/DRuz21HUqiAFRACStaODUOhNImI2g3yMHx03JZCgksS3mt",
+ "RWbQ6Jrxc4CTjx8yRhZ4NnmEFBlHYOO7OA7MflTx2ZSrY4CUPpkCD2Pji3r0N6TjvsiP24k8qnIsXIy8",
+ "auWBA3Dv6tjcXz2HWxyGCTlnjs1d89KxOa/xtYMMso+g2NrLNeI9Mz4fE2f3PIDQxXLUmugqus1qYpkp",
+ "AJ0W6PZAvFDbjAI/kxLvYrtw9J50bccw1NTBpDwvDwxbqC16++DVQq7UB2AZhyOAEWn4W2GQXrHf2G1O",
+ "wOybdr80laJCgyTjzXkNuYyJE1OmHpFgxsjlsyh1y60A6Bk72jzIXvk9qKR2xZPhZd7eavM2JVmIGkod",
+ "/7EjlNylEfwNrTBNspXXfYklaafoOq1088xEImSK6B2bGD7SDJ+CDJSASkHWEaKyq9TLqdNtAG+ci9At",
+ "Ml5gNhsud59HnlAaVsJYaI3owU/iU5gnOSbRU2o5vjpb6aVb389KNdcUPSNix84yP/oK0JV4KbSxGb5A",
+ "JJfgGn1rUKn+1jVNy0pdXytKOSuKNG/Aaa9glxWirNP06uf9/qWb9seGJZp6gfxWSHJYWWCK5KQH5p6p",
+ "yUl374Jf0YJf8Xtb77TT4Jq6ibUjl+4c/yDnosd597GDBAGmiGO4a6Mo3cMgo8jZIXeM5Kbojf9kn/V1",
+ "cJiKMPZBr50Qvzt2R9FIybVEBoO9qxD4TOTEEmGjDMPDkNaRM8CrShTbni2URh3VmPlRBo+Ql62HBdxd",
+ "P9gBDER2z1RUjQbTTcHXCviUK7qTAedkEmYuu4nyYoYQTyVMqHQwRFQTdXcIV5fAy+9h96tri8uZfZjP",
+ "7mY6TeHaj3gA16+b7U3iGZ/myZTWeQk5EuW8qrS65mXmDcxjpKnVtSdNbB7s0R+Z1aXNmJffnL167cH/",
+ "MJ/lJXCdNaLC6KqwXfUPsyrK9jdyQEImdafzBZmdRMlo85sUZbFR+mYNPiV1JI0Ocme2Dw7RUfRG6mXa",
+ "Q+igydm/jdAS97yRQNU8kbTmO3oh6b6K8GsuymA3C9COePPg4qYlYE1yhXiAO7+uRI9k2b2ym8HpTp+O",
+ "lroO8KR4rj1JszeUF94wJftP6OjzvKv8q/uGY+ZLsooMmZOsN2hJyEwp8rSNVS6MIw5Jb2euMcPGI8Ko",
+ "G7EWI0+xshbRWK7ZlNw2PSCjOZLINMn0Oi3uFsrX/Kml+FsNTBQgrfuk8VT2DiqmSfHW9uF16mSH4Vx+",
+ "YLLQt8PfRcaIs772bzwEYr+AEb/UDcB92ajMYaGNRcr9ED1JHPHgH884uBL3PNZ7+vDUTM6L6+6LW1yi",
+ "Z8j/HGFQrvbD9YGC8urTz47Mkaz3I0y21Op3SOt5qB4nApZCnluBXi6/QxzoEFe56LCYxrrTli1qZx/d",
+ "7jHpJrZCdZ0URqgedz56lsOEm8FCzSVtNQWSdHzd0gQTe5We0vgtwXiYB564Jb9Z8FQ2UidkOJjO2gfg",
+ "ji3dKhY6B9ybJtqCZmfRW3LTVlAwegW6jSUcJra5pcBA004WFVrJAKk2lgnm9P5XGpUYppY3XFIVF9eP",
+ "jpLvbYCMX67XjdKYSsKkzf4F5GLDy7TkUORDE28hVoIKlNQGogoYfiAq/kRU5KuINDFEHjXnS/ZoHpXh",
+ "8btRiGthxKIEbPGYWiy4QU7eGKKaLm55IO3aYPMnE5qva1loKOzaEGKNYo1Qh+pN83i1AHsDINkjbPf4",
+ "K/YZPtsZcQ2fOyz6+3n2/PFXaHSlPx6lLgBfYGYfNymQnfynZydpOsZ3SxrDMW4/6kky6p4qzI0zrj2n",
+ "ibpOOUvY0vO6w2dpwyVfQdpTZHMAJuqLu4mGtB5eZEHlkYzVaseETc8Pljv+NOJ97tgfgcFytdkIu/GP",
+ "O0ZtHD215S1o0jAc1VrymYkDXOEjvpFW4Ymop0R+XKMp3W+pVeNL9o98A120zhmn/CGlaL0XQr50dh7S",
+ "E2Gq5iZDM+HGzeWWjmIOOjMsWaWFtKhY1HaZ/Ynla6557tjfyRi42eLLZ4n01N00qfI4wD863jUY0Ndp",
+ "1OsRsg8yhO/LPpNKZhvHUYrP22iP6FSOPuamn+3G3g73Dz1VKHOjZKPkVnfIjUec+k6EJ/cMeEdSbNZz",
+ "FD0evbKPTpm1TpMHr90O/fLzKy9lbJRO5Rxsj7uXODRYLeAafffSm+TGvONe6HLSLtwF+k/78hBEzkgs",
+ "C2c5qQhcb34NZtlRn30nwv/6gy+nOJC9R/wMyJGg6fORYxGSLkkkoaEbH8NVs788/gvTsPQFEh8+RKAf",
+ "Ppx7Ye4vT7qfiUk9fJjOxJO0abhfWywcxQr7mQpc39Qefq0SFoaQ9r55DfHxBgkLzxirdR/cUV74oeas",
+ "m2L849+F9+PJln6tTJ+Ct2/f4JeAB/yjj4hPfORxA1t/DFrJCKFEJRaSJFM03yM/Cc6+VtuphNPjpIF4",
+ "/g5QlERJLcri1zZ6t8faNJf5OvnuuXAdf2tr7TWLo8ObTAG55lJCmRyOdIbfgm6R0H7+qqbOsxFyYtt+",
+ "UQ1abm9xLeBdMANQYUKHXmFLN0GM1W5gZON4X65UwXCeNt9ge1yHxViilPl/q8HY1IWFH8j5D+3bjh1Q",
+ "xnYGskCrwgn7jsppr4F1kkmhNh+yfXQj3+uqVLyYYxaSy2/OXjGalfpQxSjKGL9CZba7ip5dM0qlOs2N",
+ "PBR/Soe4TB9nv8+9W7WxWZPgPRVE7Fq0KehF760H1dwYOyfsZVQYl+KN3RAMk9DojdPMm9FIxkWacP+x",
+ "ludrVN07rHWc5KeXOghUaaLyok2ZsCa/KJ47B7evdkDFDuZM2TXoG2GoijJcQzduuQni96ajEMfcXZ6u",
+ "pSRKOTnilmuyiR6L9gAcXZHhOSgJWQ/xRypuVCnk2MoPF9grme6sX0ZiUFeUomCb8k+hOn7OpZIix2Rj",
+ "qSval1ue8lY6IS9b3xgfjrg/oYnDlSxe0bhTeiyOlrMIjNAjbvhYE311m0rUQX9arOu75patwBrP2aCY",
+ "hxos3l4spAGfLxaLc0d8UunO+zNyyKRLQ9Y8fR1JRhg+NWIA+NZ9+9GbhzCu4EpIVAQ92rzgRxZdrAZr",
+ "nfYoLFspMH493Rhy88b1OcFw6gK2705C9Vgcg55v3bLJV2E41FnwXPCeAq7tC9fWJ7lqfu54qtOkZ1Xl",
+ "Jx2v0JOUB+xWjiI48QKdhSfACLnN+PFoe8htr8sR3qeO0OAaHRagwnt4QBhNtZpeJTQntBJFYQtGrn7J",
+ "TBdCJsB4JSS0tY0TF0SevBJwY/C8jvQzueaWRMBJPO0SeEkKdYKhGeufqO46VD/Fl0MJrjHMMb6NbaGd",
+ "EcbRNGgFNy53TUllR92RMPECa7l7RA7L5qBU5YWoAiNPeoV0UozDMe5Qqqt7AYzo+R2ZiLpjvrtjb6Kx",
+ "YOJFXazAZrwoUul7v8avDL+yokbJAbaQ102a16piOebO6SYTGlKbnyhX0tSbPXOFBnecLqpMlaCGuDpW",
+ "2GEMVlrs8N9UjtPxnfHOOke7iwbPnOK4DFpD99eU1OtoOjNilU3HBN4pd0dHO/XtCL3tf6+UXqpVF5BP",
+ "YbYb4XLxHqX42zfu4ogzbAwS99LV0iTAQOdMFeqJotrYhG53uRJeZYNMvvgo2NQr3G+AGK88OMfLb8RF",
+ "OzbC0v1KhskxR+18NK6AWx/haDnby4JGo8bIy6tn1h1a2Mc8u8ix6/7MoX6texEaXAaHAH0f/JFZxYV3",
+ "oWiZxRCzPnJhGEsyxae53eD+Inw8wKjF7vvrMd/9kFAPv/crk12BT3tQabgWqg7OCcF7LaiE9GunzlcT",
+ "PZFc/9DwilN9WnPoqPH20leIoGV6nfz7X8nXkYG0evd3YModbPqg5tlQ2iXzVNuENcnFJyUb79yKU5JN",
+ "pvIaetmwU3XtQM24AVm9nCIODGvAzWfnxVEXZio35oxGSR27dEW38dRhbbowPGKVMqLN8Z8q9TbRTfQS",
+ "q7VFqc+GYwUfrWvILRZ2aH1PNMAxidDcZFHx2H+mEBtRpxtvWp85bF+6sGE1hwN3/CCiL4pKpUz4J9OT",
+ "Y501HobIpzGj9Qqkr9/ajdWZHDGwXEJuxfWBCMr/XIOMovPmwS5DddijgErReKBjAp7jrY4tQPsCHPfC",
+ "EyXCvDM4Y/FTV7B7YFiHGpKp+efhqr1N7hXEAHKHzJGIMikPHjIke6cKYRrKQCwEjznqDm0Wu9GqXlE8",
+ "8C3nCiTpLo42RnjPlOmyQpPmcl2PipxHZ+qxIMthVZJx/eMlFoExTcXNkLsl1tLZ+TDD5Y3P/YLxrs3b",
+ "ScgCAyb8FoLbaZZSXEFcdwxfqm64LkKLpOklWHWyPffRIDIyVNToA71sZhatf/MwFi6RMw292PNSOTEi",
+ "GwsF6LoUN/44Dww5TlEKf3SWdnAtQfv6jCj/lspAZlXwh94Hxz5UkHfYrZBgRvOUEnCj2YN+btMjYb5m",
+ "jtmCuHcKixfINGy4g05HSYzG59yH7Bf0PQR/hXy9By1MDb0eLhwRPNuFGSAxpvol87fl4aCy2xibhJRU",
+ "A9ykMhpJ0N3XkEqros7pgo4PRmOQm5wvbA8rSdpp8uEqezpCFJl7BbtTUoJCxY2wgzHQJDkR6FEmjN4m",
+ "36v5zaTgXt0LeJ/ScjWfVUqV2chjx/kwDVOf4q9EfgUFczdF8AAdqYLEPkMbe/OafbPehbRDVQUSis9P",
+ "GDuT5HMfHra7ecB7k8sHdt/8W5y1qCkzmjeqnbyVaedlzFmm78jNwjD7eZgBx+ruOBUNciDJz3YkBZTm",
+ "N4maYCdTtfLhU3O/TlNLVARFSia5oBerF3jQU4ajGy0seMcGusTdRjL/0sVMqVJOgnAzLX6/cSh1O1Kq",
+ "kYs7ngwBsiCnxHk2UPjBkwhoajAdcBRqfITa8jWtn9BQPCpLdZPhMcqaJHYppcu1694SIW1v282R2wIi",
+ "hyNuvASxY2tesFxpDXncIx2nQ0BtlIasVOh/lHoaXVonEG7QOV+yUq2YqpyeT7kgwyNSsrZSNNd91ZGi",
+ "mHOCIKMXr5GsHmB8jLkHlxoP4d1Tyun4MlGX64ThCjcs7NbRtaA8wR1dwiUCcwKhHzbanaVKXXXX1S+6",
+ "NlYC0aqNyNPo/sdy1xl1sklRbwoVPosyRXFiMzzgMU9pXmfx9AzRDJIvyiSv9sfPv1Ihnbv/4hXeH5ct",
+ "wTOXEX6WqNlMbDjLRy+LHgAIKYUW2VpT6uWYlTcF3dSKQhHxja0P6ESGg64Md4PNjXCfQH3YTyipim+J",
+ "g9Dsji9IF2KpRw5V0kliv08CVQFdTPVMaDLNT+SfEQDjvgodGCZ5LBwLxhKr6mY8geTzRk+cd4qei94l",
+ "EbKAEjPMOdmJ1sDc2LUGH9tL5T979cYqbtdBbnTNh9YcWcAWDAbeUtEkbsj2GGygvvZoXyBXVVbCNXRc",
+ "OHzAcZ3nYIy4hrhuKXVmBUCFLwJ9PTXlmxBfhz3lxa89i163p2A3qc0QYmmn2AFVJalYbWVGx8RMPUoO",
+ "omtR1LyDP3OHCo5jxRsT93WA9d00TnE0k0gvbh+LOOhNhDSfPJcy7UwUx7s3ZkicrWieK4gI25NtKn4j",
+ "x9X2IVG24ub02qcRYr/ZQo5Xd9db5u44YTgYM71cFqNypm52+Lbmn1Eq20dkg0qwaT0MQiXvOO1U0BV8",
+ "38TVSIZqYRIDCNPyBvS9hda3M2q24TtWiOUSND3FGctlwXURNxeS5aAtF5Ld8J25vU7moNU1zA+qZY5T",
+ "46CBWaUUNLQqEyDlziv8YyrTBFUH310Tag5d21aNFakd7Eo6GIhvnWqIXpEjROBTUaBiSIdVSZTK2YZf",
+ "wZHzGPE77J8GE0R5y71VOOuUKT7spfWfEHV44H+Rwu6ldpL3+m6q9I5IxBhoUK5aZwbanCENpjyLL6lU",
+ "Wuxd3K88EvaajJo0H4xkUu2K6SO7iGYd75Yey+RmurrasRyl/JeJh2fI280edwUwUa223Jubh2LJ4FIg",
+ "pMy99/eRUgupC7woxFhp/DX4dOX+bHWnbUyAbpzplu7I3pWGqFJVlk95wyqgBMdqSGvxkHZhnGAjq/ID",
+ "10LykhzhSl0VSS2RP+CxINEAvX2aC3He90PrCgHNwcO6y3mtUYy94bvDKTFbQSDtwk8jBx08eCY1UPsN",
+ "piNuqJRPMuPkMQJiguukqtkMc/3d/2IoNqV9Pf/jluPfx9ILOJNeUcIahfvorVWlAqkkaI3LXYpphBeg",
+ "WyxwTD6c4F19b1vVnJY/YoOSl+TtUkBPAm3oaZvAZlSzfb/zU5whvk1boMlhG50lgkba5xc/tJrqtOrx",
+ "ocMB8GKfuKh+fHie9OB84vj/HxqkREt5N0YJneUfcrPzC2xV+2iLvLRsLVC9DooZ7e5L5ENpXjSuiSNX",
+ "88CDEdPBO/GsLBOejyTAU3HxiHDcvaivefnxvRexTsAZ4gOKn8f9HWL3txjJhEpzu+DbV3zS3JGr2/1N",
+ "LV+jt+V/gtuj5LXgh/I2gwHzR/WLl/Q0tQyVhq9Bshsckyy2j79kC59gqtKQC9O3RdyEIoCNtxfWxPUB",
+ "z1t7wL3s0Dp/VfYOZLwMpj32Y1tQDF9fVrKFsD2in5ipjJzcJJWnqG9AFgn8pXhUnOn5wHVx1YnhaKW6",
+ "6EZTGu45liOKyjwylmOYw3rq8ihewV06tYHhOiff1h3cJi7qdm1TA5EmZ4PCak9T4ofSmZtcdwxgupcU",
+ "TkclcPoDQpcIR34MP2+KYn4dS2ZBCRtG8qb09qMWZXGIMDpZcD40NfIxz8tvPl/ax71LAwTkTj08qr5k",
+ "9R1iQAgxibV2Jo+mivLbTEht47slEtmgq1Jea2F3mMY9aLzit2SQ1XeNw74P+GiMqP7us+oKmkIArXt/",
+ "bcLt+p3iJd5HZNuV7hZS5Qn7Zss3VeltIuzPDxb/Bk//9Kx49PTxvy3+9OiLRzk8++KrR4/4V8/446+e",
+ "PoYnf/ri2SN4vPzyq8WT4smzJ4tnT559+cVX+dNnjxfPvvzq3x44PuRAJkBnIWno7H9nZ+VKZWevz7NL",
+ "B2yLE16J72FH5csdGYfC6DzHkwgbLsrZ8/DT/wwn7CRXm3b48OvM5yScra2tzPPT05ubm5O4y+kK/Xkz",
+ "q+p8fRrmGVROP3t93ryb07ML7mjjMUW+OJ4UzvDbz99cXLKz1+cnLcHMns8enTw6eezGVxVIXonZ89lT",
+ "/AlPzxr3/dQT2+z5+w/z2ekaeInhL+6PDVgt8vBJAy92/v/mhq9WoE98tXj30/WT0yBWnL73fs0f9n07",
+ "jQsvnr7vuH8XB3piYbbT9yHf+P7WnYTe3u096jARin3NTheYAm9qUzBR4/GloLJhTt+juDz6+6nP2ZX+",
+ "iGoLnYfTECORbtnB0nu7dbD2euTc5uu6On2P/0H6jMCiCPlTu5Wn+EBw+r6zGv95sJru7233uMX1RhUQ",
+ "AFbLJdVP2Pf59D39G00E2wq0cIIfRqX4Xyl68BSzmu6GP++kN6+XkIr5+EUaIMU0ZOzaybyNYW2O7HkR",
+ "Gl/sZB4k1BAJjgfxyaNHNP0z/M/M50vsRUac+hM3sSRRNyYd2VzvnbeBF1NgY1AAwvD448FwLjFoyvEv",
+ "Rvz5w3z2xcfEwrnT2SUvGbak6Z9+xE0AfS1yYJewqZTmWpQ79ots8mxFOdhTFHgl1Y0MkLvLvd5suN6h",
+ "0LxR12CYT+8eESfT4MQUes7DJ6eWhvF24SuDxnysfjebUwaCdygY2ZSMEOw1w5mCraodvHsqvjt4Jqbv",
+ "Qlf03BPyMQnOAw8dNPxQbh7ub9j7/vMETfUgtUGzfzKCfzKCe2QEttZy9IhG9xfGLULlPTxznq9hHz8Y",
+ "3pbRBT+rVMr9/WIPs/DZAcd4xUWXV0QFFp+/mZaV1z8wkO24ACN80SnUG5xQ3Ir1uuFI4cyjD0e01/vK",
+ "Znx493dxv7/gMpznzo5T6AzXpQDdUAGXw4SN/+QC/99wAco8y2lf58xCWZr47FuFZ58eW3w4uqRHsIl8",
+ "oOqVw079fPq+W0C2oySYdW0LdRP1RZM5vfcMdQdf+rv39+kNFzZbKu1D0bHAz7CzBV6e+ryTvV/bVE+D",
+ "L5i/KvoxdvBM/nra1E9Lfuyro6mvXh0baRR8xMLn1jQVm3qQQzZGnjfvHH/C6hyeebaWi+enpxjeuVbG",
+ "ns4+zN/3rBrxx3cNSYR03LNKi2vM7vXuw/8LAAD//9Mad+9M2AAA",
}
// GetSwagger returns the content of the embedded swagger specification file
diff --git a/daemon/algod/api/server/v2/generated/experimental/routes.go b/daemon/algod/api/server/v2/generated/experimental/routes.go
index e253331bf..47db46103 100644
--- a/daemon/algod/api/server/v2/generated/experimental/routes.go
+++ b/daemon/algod/api/server/v2/generated/experimental/routes.go
@@ -75,175 +75,185 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9/ZPbNrLgv4LSe1X+OFEafyRvPVWpdxM7yc7FcVyeSfbe8/gSiGxJ2CEBLgBqpPj8",
- "v1+hAZAgCUrUzMTerbqf7BHx0Wg0Gv2Nj5NUFKXgwLWanH6clFTSAjRI/Iumqai4Tlhm/spApZKVmgk+",
- "OfXfiNKS8dVkOmHm15Lq9WQ64bSApo3pP51I+EfFJGSTUy0rmE5UuoaCmoH1rjSt65G2yUokbogzO8T5",
- "q8mnPR9olklQqg/lzzzfEcbTvMqAaEm5oqn5pMgN02ui10wR15kwTgQHIpZEr1uNyZJBnqmZX+Q/KpC7",
- "YJVu8uElfWpATKTIoQ/nS1EsGAcPFdRA1RtCtCAZLLHRmmpiZjCw+oZaEAVUpmuyFPIAqBaIEF7gVTE5",
- "fT9RwDOQuFspsA3+dykB/oBEU7kCPfkwjS1uqUEmmhWRpZ077EtQVa4Vwba4xhXbACem14z8VClNFkAo",
- "J+++f0mePXv2wiykoFpD5ohscFXN7OGabPfJ6SSjGvznPq3RfCUk5VlSt3/3/Uuc/8ItcGwrqhTED8uZ",
- "+ULOXw0twHeMkBDjGla4Dy3qNz0ih6L5eQFLIWHkntjG97op4fxfdFdSqtN1KRjXkX0h+JXYz1EeFnTf",
- "x8NqAFrtS4MpaQZ9f5K8+PDxyfTJyad/e3+W/Lf786tnn0Yu/2U97gEMRBumlZTA012ykkDxtKwp7+Pj",
- "naMHtRZVnpE13eDm0wJZvetLTF/LOjc0rwydsFSKs3wlFKGOjDJY0irXxE9MKp4bNmVGc9ROmCKlFBuW",
- "QTY13PdmzdI1SamyQ2A7csPy3NBgpSAborX46vYcpk8hSgxct8IHLuifFxnNug5gArbIDZI0FwoSLQ5c",
- "T/7GoTwj4YXS3FXquMuKXK6B4OTmg71sEXfc0HSe74jGfc0IVYQSfzVNCVuSnajIDW5Ozq6xv1uNwVpB",
- "DNJwc1r3qDm8Q+jrISOCvIUQOVCOyPPnro8yvmSrSoIiN2vQa3fnSVCl4AqIWPwdUm22/X9d/PyGCEl+",
- "AqXoCt7S9JoAT0UG2YycLwkXOiANR0uIQ9NzaB0Ortgl/3clDE0UalXS9Dp+o+esYJFV/US3rKgKwqti",
- "AdJsqb9CtCASdCX5EEB2xAOkWNBtf9JLWfEU97+ZtiXLGWpjqszpDhFW0O03J1MHjiI0z0kJPGN8RfSW",
- "D8pxZu7D4CVSVDwbIeZos6fBxapKSNmSQUbqUfZA4qY5BA/jx8HTCF8BOH6QQXDqWQ6Aw2EboRlzus0X",
- "UtIVBCQzI7845oZftbgGXhM6WezwUylhw0Sl6k4DMOLU+yVwLjQkpYQli9DYhUOHYTC2jePAhZOBUsE1",
- "ZRwyw5wRaKHBMqtBmIIJ9+s7/Vt8QRV8/Xzojm++jtz9peju+t4dH7Xb2CixRzJydZqv7sDGJatW/xH6",
- "YTi3YqvE/tzbSLa6NLfNkuV4E/3d7J9HQ6WQCbQQ4e8mxVac6krC6RV/bP4iCbnQlGdUZuaXwv70U5Vr",
- "dsFW5qfc/vRarFh6wVYDyKxhjSpc2K2w/5jx4uxYb6N6xWshrqsyXFDaUlwXO3L+amiT7ZjHEuZZre2G",
- "isfl1isjx/bQ23ojB4AcxF1JTcNr2Ekw0NJ0if9sl0hPdCn/MP+UZW5663IZQ62hY3clo/nAmRXOyjJn",
- "KTVIfOc+m6+GCYBVJGjTYo4X6unHAMRSihKkZnZQWpZJLlKaJ0pTjSP9u4Tl5HTyb/PG/jK33dU8mPy1",
- "6XWBnYzIasWghJblEWO8NaKP2sMsDIPGT8gmLNtDoYlxu4mGlJhhwTlsKNezRmVp8YP6AL93MzX4ttKO",
- "xXdHBRtEOLENF6CsBGwbPlAkQD1BtBJEKwqkq1ws6h8enpVlg0H8flaWFh8oPQJDwQy2TGn1CJdPm5MU",
- "znP+akZ+CMdGUVzwfGcuBytqmLth6W4td4vVtiW3hmbEB4rgdgo5M1vj0WDE/PugOFQr1iI3Us9BWjGN",
- "/+rahmRmfh/V+V+DxELcDhMXKloOc1bHwV8C5eZhh3L6hOPMPTNy1u17O7Ixo8QJ5la0snc/7bh78Fij",
- "8EbS0gLovti7lHFU0mwjC+sduelIRheFOTjDAa0hVLc+awfPQxQSJIUODN/mIr3+K1XrezjzCz9W//jh",
- "NGQNNANJ1lStZ5OYlBEer2a0MUfMNEQFnyyCqWb1Eu9reQeWllFNg6U5eONiiUU99kOmBzKiu/yM/6E5",
- "MZ/N2Tas3w47I5fIwJQ9zs7JkBlt3yoIdibTAK0QghRWwSdG6z4KypfN5PF9GrVH31mbgtshtwjcIbG9",
- "92PwrdjGYPhWbHtHQGxB3Qd9mHFQjNRQqBHwvXKQCdx/hz4qJd31kYxjj0GyWaARXRWeBh7e+GaWxjh7",
- "thDydtynw1Y4aUzOhJpRA+Y77SAJm1Zl4kgxYrayDToDNV6+/UyjO3wMYy0sXGj6J2BBmVHvAwvtge4b",
- "C6IoWQ73QPrrKNNfUAXPnpKLv5599eTpb0+/+tqQZCnFStKCLHYaFHnodDOi9C6HR/2VoXZU5To++tfP",
- "vaGyPW5sHCUqmUJBy/5Q1gBqRSDbjJh2fay10YyrrgEcczgvwXByi3ZibfsGtFdMGQmrWNzLZgwhLGtm",
- "yYiDJIODxHTs8pppduES5U5W96HKgpRCRuxreMS0SEWebEAqJiLelLeuBXEtvHhbdn+30JIbqoiZG02/",
- "FUeBIkJZesvH83079OWWN7jZy/nteiOrc/OO2Zc28r0lUZESZKK3nGSwqFYtTWgpRUEoybAj3tE/gEZR",
- "4JIVcKFpUf68XN6PqihwoIjKxgpQZiZiWxi5XkEquI2EOKCduVHHoKeLGG+i08MAOIxc7HiKdsb7OLbD",
- "imvBODo91I6ngRZrYMwhW7XI8u7a6hA67FQPVAQcg47X+BkNHa8g1/R7IS8bS+APUlTlvQt53TnHLoe6",
- "xThTSmb6eh2a8VXejr5ZGdhnsTV+kQW99MfXrQGhR4p8zVZrHagVb6UQy/uHMTZLDFD8YJWy3PTpq2Zv",
- "RGaYia7UPYhgzWANhzN0G/I1uhCVJpRwkQFufqXiwtlAvAY6itG/rUN5T6+tnrUAQ10prcxqq5Kg97Z3",
- "XzQdE5raE5ogatSA76p2OtpWdjobC5BLoNmOLAA4EQvnIHKuK1wkRdez9uKNEw0j/KIFVylFCkpBljjD",
- "1EHQfDt7deg9eELAEeB6FqIEWVJ5Z2CvNwfhvIZdgoESijz88Vf16AvAq4Wm+QHEYpsYems133kB+1CP",
- "m34fwXUnD8mOSiD+XiFaoDSbg4YhFB6Fk8H960LU28W7o2UDEv1xfyrF+0nuRkA1qH8yvd8V2qocCP9z",
- "6q2R8MyGccqFF6xig+VU6eQQWzaNWjq4WUHACWOcGAceELxeU6WtD5nxDE1f9jrBeawQZqYYBnhQDTEj",
- "/+o1kP7YqbkHuapUrY6oqiyF1JDF1sBhu2euN7Ct5xLLYOxa59GCVAoOjTyEpWB8hyy7EosgqmtXiwuy",
- "6C8OHRLmnt9FUdkCokHEPkAufKsAu2EI1AAgTDWItoTDVIdy6rir6URpUZaGW+ik4nW/ITRd2NZn+pem",
- "bZ+4qG7u7UyAwsgr195BfmMxa4Pf1lQRBwcp6LWRPdAMYp3dfZjNYUwU4ykk+ygfVTzTKjwCBw9pVa4k",
- "zSDJIKe7/qC/2M/Eft43AO54o+4KDYmNYopvekPJPmhkz9ACx1Mx4ZHgF5KaI2hUgYZAXO8DI2eAY8eY",
- "k6OjB/VQOFd0i/x4uGy71ZER8TbcCG123NEDguw4+hiAB/BQD317VGDnpNE9u1P8Fyg3QS1HHD/JDtTQ",
- "Eprxj1rAgA3VBYgH56XD3jscOMo2B9nYAT4ydGQHDLpvqdQsZSXqOj/C7t5Vv+4EUTcjyUBTlkNGgg9W",
- "DSzD/sTG33THvJ0qOMr21ge/Z3yLLCdnCkWeNvDXsEOd+60N7AxMHfehy0ZGNfcT5QQB9eFiRgQPm8CW",
- "pjrfGUFNr2FHbkACUdWiYFrbgO22qqtFmYQDRP0ae2Z0TjwbFOl3YIxX8QKHCpbX34rpxOoE++G77CgG",
- "LXQ4XaAUIh9hIeshIwrBqHgPUgqz68zFjvvoYU9JLSAd00YPbn39P1AtNOMKyH+JiqSUo8pVaahlGiFR",
- "UEAB0sxgRLB6ThfZ0WAIcijAapL45fHj7sIfP3Z7zhRZwo1PuDANu+h4/BjtOG+F0q3DdQ/2UHPcziPX",
- "Bzp8zMXntJAuTzkcWeBGHrOTbzuD114ic6aUcoRrln9nBtA5mdsxaw9pZFxUBY47ypcTDB1bN+77BSuq",
- "nOr78FrBhuaJ2ICULIODnNxNzAT/bkPzn+tuB3S6JgqMFQVkjGrId6SUkIKNzjeimqrHnhEbt5euKV+h",
- "hC5FtXKBY3Yc5LCVsrYQWfHeEFEpRm95glblGMd1wcI+QcPIL0CNDtU1SVuN4YbW87mcnDFXod+5iIk+",
- "6pWaTgZVTIPUTaNiWuS0s0xGcN+WgBXgp5l4pO8CUWeEjT6+wm0x1Gs298+xkTdDx6DsTxyEsjUfh6LZ",
- "jH6b7+5ByrADEQmlBIV3QmgXUvarWIYZZe7SUDuloeibzm3X3waO37tBBU3wnHFICsFhF02iZhx+wo/R",
- "44T30kBnlBCG+naF/hb8HbDa84yhxrviF3e7e0K7LiL1vZD35YO0A46Wp0e4/A76t92Ut3VM0jyP+PJc",
- "vkmXAahpnd/OJKFKiZShkHSeqak9aM7955JT2uh/W0fR3sPZ647bcVqFqYxolIW8JJSkOUOTreBKyyrV",
- "V5yiUShYaiTayGu/w2bCl75J3C4ZMRu6oa44xUiz2lQUjZBYQsQu8j2AtxaqarUCpTvKxRLgirtWjJOK",
- "M41zFea4JPa8lCAx5GdmWxZ0R5aGJrQgf4AUZFHptriN6VRKszx3HjQzDRHLK041yYEqTX5i/HKLw3kv",
- "uz+yHPSNkNc1FuK3+wo4KKaSeFTUD/YrBqy65a9d8Cqmv9vP1udixm9yrnZoM2pSuv/Pw/88fX+W/DdN",
- "/jhJXvyP+YePzz89etz78emnb775v+2fnn365tF//ntspzzssWQfB/n5K6eKnr9CfaNxuvRg/2wG94Lx",
- "JEpkYfhEh7bIQ0xsdQT0qG2N0mu44nrLDSFtaM4yw1tuQw7dG6Z3Fu3p6FBNayM61ie/1iOl+DtwGRJh",
- "Mh3WeGspqh9IGE+rQy+gy5TD87KsuN1KL33brBEf0CWW0zp10lZVOSWYV7emPhrR/fn0q68n0yYfrv4+",
- "mU7c1w8RSmbZNpb1mME2ppy5A4IH44EiJd0p0HHugbBHY9dsMEU4bAFGq1drVn5+TqE0W8Q5nI/Fd0ae",
- "LT/nNkjenB/0Ke6cq0IsPz/cWgJkUOp1rNpCS1DDVs1uAnTiPEopNsCnhM1g1jWyZEZfdFF0OdAlZv2j",
- "9inGaEP1ObCE5qkiwHq4kFGWjBj9oMjjuPWn6cRd/ure1SE3cAyu7py1A9H/rQV58MN3l2TuGKZ6YBNw",
- "7dBBymRElXZZQa0IIMPNbI0ZK+Rd8Sv+CpaMM/P99IpnVNP5giqWqnmlQH5Lc8pTmK0EOfWJRq+ople8",
- "J2kNloEKUrxIWS1ylpLrUCFpyNOW9uiPcHX1nuYrcXX1oRcM0Vcf3FRR/mInSIwgLCqduMIEiYQbKmPO",
- "JlUnpuPItvLIvlmtkC0qa5H0hQ/c+HGeR8tSdRNU+8svy9wsPyBD5dIvzZYRpYX0sogRUCw0uL9vhLsY",
- "JL3xdpVKgSK/F7R8z7j+QJKr6uTkGZBWxubv7so3NLkrYbR1ZTCBtmtUwYVbtRK2WtKkpKuYT+vq6r0G",
- "WuLuo7xcoI0jzwl2a2WK+kh4HKpZgMfH8AZYOI7OesPFXdhevghVfAn4CbcQ2xhxo/G033a/gtzRW29X",
- "J/+0t0uVXifmbEdXpQyJ+52pa9OsjJDlwx8UW6G26sr4LICka0ivXX0VKEq9m7a6+wgbJ2h61sGUrbxj",
- "M7+w9gN6BBZAqjKjThSnfNdNwlegtY/jfQfXsLsUTemIY7Lu20ngauigIqUG0qUh1vDYujG6m+/CuFCx",
- "L0ufS41JdZ4sTmu68H2GD7IVee/hEMeIopWkPIQIKiOIsMQ/gIJbLNSMdyfSjy3PaBkLe/NFqvB43k9c",
- "k0Z5chFX4WrQ6m6/F4BlvMSNIgtq5HbhKlDZROeAi1WKrmBAQg6dMiPTiVuOHBzk0L0XvenEsnuh9e6b",
- "KMi2cWLWHKUUMF8MqaAy04mz8zNZv5/zTGBhSYewRY5iUh2QaJkOlS3nmK2UNwRanIBB8kbg8GC0MRJK",
- "NmuqfHEsrCHmz/IoGeBPTNzfV67lPAgRCwqF1cVYPM/tntOedumKtvhKLb48S6hajii1YiR8jEqPbYfg",
- "KABlkMPKLtw29oTSFBFoNsjA8fNymTMOJIlFmwVm0OCacXOAkY8fE2It8GT0CDEyDsBGfzYOTN6I8Gzy",
- "1TFAclcEgfqx0RMe/A3xfC0bf21EHlEaFs4GvFqp5wDUhSjW91cnUBaHIYxPiWFzG5obNuc0vmaQXtUQ",
- "FFs7NUJcRMWjIXF2jwPEXixHrcleRbdZTSgzeaDjAt0eiBdim9iEzajEu9guDL1HQ9IxfTR2MG19lgeK",
- "LMQWo3TwarEh0AdgGYbDgxFo+FumkF6x39BtboHZN+1+aSpGhQpJxpnzanIZEifGTD0gwQyRy8Og5Mqt",
- "AOgYO5r6xU75PaiktsWT/mXe3GrTppSYz/aJHf+hIxTdpQH89a0wdZGUt12JJWqnaAebtOvDBCJkjOgN",
- "m+g7afquIAU5oFKQtISo5DrmOTW6DeCNc+G7BcYLrEJD+e5REMEkYcWUhsaI7uMkvoR5kmLxOyGWw6vT",
- "pVya9b0Tor6mrBsRO7aW+dlXgCHASyaVTtADEV2CafS9QqX6e9M0Liu1Y6RsqViWxXkDTnsNuyRjeRWn",
- "Vzfvj6/MtG9qlqiqBfJbxm3AygJLG0cjJ/dMbYNr9y74tV3wa3pv6x13GkxTM7E05NKe41/kXHQ47z52",
- "ECHAGHH0d20QpXsYZJDx2ueOgdwU+Phn+6yvvcOU+bEPRu34vNuhO8qOFF1LYDDYuwqGbiIjljAdVAbu",
- "p6IOnAFalizbdmyhdtRBjZkeZfDw9dQ6WMDddYMdwEBg94xlw0hQ7dJ5jYBvazy3KtfMRmHmsl3gLmQI",
- "4VRM+RcK+oiqs+UO4eoSaP4j7H41bXE5k0/Tyd1MpzFcuxEP4Pptvb1RPKNr3prSWp6QI1FOy1KKDc0T",
- "Z2AeIk0pNo40sbm3R39mVhc3Y15+d/b6rQP/03SS5kBlUosKg6vCduW/zKpslb6BA+IroBudz8vsVpQM",
- "Nr8uLRYapW/W4EpJB9Jor+Zl43AIjqIzUi/jEUIHTc7ON2KXuMdHAmXtImnMd9ZD0vaK0A1lubebeWgH",
- "onlwceMKp0a5QjjAnb0rgZMsuVd20zvd8dPRUNcBnhTOtafYdWHruSsieNeFjjHPu9J53QuKFSutVaTP",
- "nHhVoCUhUTlL4zZWvlCGOLj1nZnGBBsPCKNmxIoNuGJ5xYKxTLMxNWk6QAZzRJGpomVxGtwthHurp+Ls",
- "HxUQlgHX5pPEU9k5qFjexFnb+9epkR36c7mBrYW+Gf4uMkZYrbV74yEQ+wWM0FPXA/dVrTL7hdYWKfND",
- "4JI4wuEfzti7Evc46x19OGq2wYvrtsctfFqnz/8MYdga64ff9fHKqysbOzBH9J0eppKlFH9AXM9D9TiS",
- "aOTr0zKMcvkDwkSH8HWKFouprTvNc0PN7IPbPSTdhFaodpDCANXjzgduOSyU6S3UlNutts9mtGLd4gQT",
- "RpXO7fgNwTiYe5G4Ob1Z0FgVUSNkGJjOGgdwy5auBfGdPe5VnW1hZyeBL7luy2wSeQmyyQHsF6S5pcBg",
- "px0tKjSSAVJtKBNMrf8vVyIyTMVvKLevr5h+9ii53gqs8cv0uhESS0CouNk/g5QVNI9LDlnaN/FmbMXs",
- "wyKVguDlCjeQfbTJUpF7/aPOIXKoOV+Sk2nwfI7bjYxtmGKLHLDFE9tiQRVy8toQVXcxywOu1wqbPx3R",
- "fF3xTEKm18oiVglSC3Wo3tTOqwXoGwBOTrDdkxfkIbrtFNvAI4NFdz9PTp+8QKOr/eMkdgG4h2H2cZMM",
- "2cnfHDuJ0zH6Le0YhnG7UWfRbHn7Mtww49pzmmzXMWcJWzped/gsFZTTFcQjRYoDMNm+uJtoSOvghWf2",
- "WSOlpdgRpuPzg6aGPw1Enxv2Z8EgqSgKpgvn3FGiMPTUPEthJ/XD2TeSXEVhD5f/iD7S0ruIOkrk5zWa",
- "2vsttmr0ZL+hBbTROiXU1v3IWRO94Ouck3NfVghLLNeVlS1uzFxm6SjmYDDDkpSScY2KRaWXyV9IuqaS",
- "pob9zYbATRZfP4+UlW6XN+XHAf7Z8S5BgdzEUS8HyN7LEK4vecgFTwrDUbJHTbZHcCoHnblxt92Q73D/",
- "0GOFMjNKMkhuVYvcaMCp70R4fM+AdyTFej1H0ePRK/vslFnJOHnQyuzQL+9eOymjEDJWK7A57k7ikKAl",
- "gw3G7sU3yYx5x72Q+ahduAv0X9bz4EXOQCzzZzmmCHwrItqpL3VeW9JdrHrEOjB0TM0HQwYLN9SUtMtK",
- "f34+ej9RUHFPlzds9x1b5ovHA/7RRcQXJhfcwMaXb1cyQChBWf0oyWT198DHTsm3YjuWcDqn0BPPPwGK",
- "oiipWJ792mR+dl4tkJSn66jPbGE6/ta8r1Yvzt6B0bJ/a8o55NHhrLz5m5dLI5Lz38XYeQrGR7btPqRg",
- "l9tZXAN4G0wPlJ/QoJfp3EwQYrWdVFcHbecrkRGcp6kx1xzX/gMcQZn0f1SgdCxBCT/YwDG0jRp2YKt0",
- "E+AZaqQz8oN9QnkNpFVACDVBXyminTVdlbmg2RQrWFx+d/aa2FltH/tKkK0SvkJFqL2Kjk0sKJ85LgTZ",
- "P/gTT48YP87+eG2zaqWTuqh3LAHVtGjKjrOOnwBVpBA7M/IqeAzV5qqaIQw9LJksjFZXj2blI6QJ8x+t",
- "abpGta/FWodJfnx5e0+VKnhSsn4aqq4piefOwO0q3NsC91MijG5+w5R9ORc20M55rRPAndnB58C2lycr",
- "zi2lzI645eoKksei3QNnr0jvSohC1kH8kUK/fR3i2Gr/F9grWuKq+3RA7y1Jm0FZP/njX0RPKRecpVhg",
- "KnZFuyd2x/jZRtTi6hpy/RF3JzRyuKIPFtSheA6Lg08YeEboENc39AdfzaZa6rB/anzLdU01WYFWjrNB",
- "NvXvbjhbI+MKXI1QfJA54JNCtnyXyCGj7vCkdpscSUaYejOgPH5vvr1xpgWMSb9mHJUIhzYn+FlrIL4A",
- "qo3mwTRZCVBuPe38Y/Xe9JlhKm4G2w8z/2IojmFdf2bZ1s/dH+rMe72dl9m0fWnaugJJ9c+tKGc76VlZ",
- "ukmHX2WJygN6ywcRHPFeJt59FCC3Hj8cbQ+57Q1XwfvUEBps0NkNJd7DPcKoXyjpvH5lhFZLUdiC2DCx",
- "aJUExiNgvGYcmvdsIxdEGr0ScGPwvA70U6mk2oqAo3jaJdAcPdwxhqa0c2/cdahueSiDElyjn2N4G5vH",
- "VQYYR92gEdwo39XP6BrqDoSJl/h+t0Nk/6kUlKqcEJVh1kLn8ZQY4zCM2z/P1L4A+segLxPZ7lpSe3KO",
- "uYmGElEXVbYCndAsi5Vs/Ra/EvxKsgolB9hCWtWlPcuSpFh3pV2Ipk9tbqJUcFUVe+byDe44XfAaUYQa",
- "wheR/A5jostih//G6loO74wL9Dg61NBHdWTHVV/qh07GpF5D04liq2Q8JvBOuTs6mqlvR+hN/3ul9Fys",
- "2oB85vIT+7hcuEcx/vaduTjC6gy9Yq32aqmLJ2Bgn/BvSKLaWKf9trkSXmW96q3oUKrfqNtvgBh+bW6K",
- "l99AeG9QdIPa+9V6KIeCfNPBmHSqXXacpmQvCxrMOLIRQja3CKGIW2eHooJsUJD53Os9TjLsydk6Xvgw",
- "QKgPN+sD9KOPZSUlZc793jCLPmZd1Hs/D2FMPGyzwd1FuFjyQYvdj5uhuG9fjA2/d1+jugaXMl9K2DBR",
- "ece2j3zyKqH9tfW2Ux15H11/3/CKU31Zc+ig8fbSvQpgl+l08h9/tXFyBLiWu38CU25v03vvXPWlXWue",
- "apqQuqD0qALTrVtxTKHCWE08Jxu2Xto68E5Yj6xejREH+u9+TSfn2VEXZqyu4sSOEjt28Ve8hstONaWm",
- "8IiVQrGmrnvsea+RIYaX+EJXUDarP5aP79lAqrGYfxO3IAGOKaJlJgseDP3/5acG1Ok6EtNVndpXaqpf",
- "wf/AHd/LBgsyGm3189n4wkpndXQa8mmshrwC7t7sbOd5jI42Xy4h1WxzIPvub2vgQWbX1Ntl7NvbQTIe",
- "q6OXsXjL8VbHBqB9yXF74QmKKN4ZnKHcm2vYPVCkRQ3RcuxTf9Xepm4HYgC5Q2JIRKhY9Ic1JDuHPFM1",
- "ZSAWfLSV7Q5NBbTBl5yCXNJbzuVJ0lwcTX7pninjT8mMmst0PSrrGgNxhxL0+i9RDOsfr/DhD1W/sujr",
- "foRaOjnvV0e8cXVDMFey9p34CiKg/G8+MdrOkrNrCN+aQk/VDZWZbxE1vXirTrLnPupl1flXFLpAL+uZ",
- "WRMb28+jitTbwgjoNBdGjEiGwsjb4ah1LMcDZYNubPl3DLQ1cC1Bujf5UP7NhYJECx9Luw+OfaiwkUW3",
- "QoIarHFpgRusPPOuKa2DtX4pVpqhLqAoXCCRUFADnQwK4AzPuQ/ZL+13nzjka70etDDV9Hr40QEfFc1U",
- "D4kh1S+Juy0PJyTdxtjEOLfvPqtYNRwOsu0NKaXIqtRe0OHBqA1yo2tN7WElUTtN2l9lR0cIsjqvYTe3",
- "SpB/rcHvYAi0lZws6EEVhc4m36v5TcXgXt0LeF/ScjWdlELkyYCz47xfwqdL8dcsvYaMmJvCRw8OvHxD",
- "HqKNvfZm36x3vmRNWQKH7NGMkDNu47W9Y7tdQ7ozOX+g982/xVmzylbVcka12RWPB75ivSt5R27mh9nP",
- "wxQYVnfHqewgBwrEbAfKB0l6E3kHajZWK++7mrtv8zREZaGIySTNszMH4mTqEJnm5Y8mTKYvHeS5uEmQ",
- "ipK6/ldM5zDt2kzSVzxtuhlsLyCIt6HKXaA7sqYZSYWUkIY94ikOFqhCSEhygeE3Mc/gUht5qMC4Zk5y",
- "sSKiNGquLaPnfSjRZ2mCuWyare2ZWEfNQCEDUC6t1k1jG/fn2fN6zfEv41yuI/YWRLTH8tHP3zhCOfrV",
- "igDMEQR62NZ0Fnvdp72u7vtQQ6+1aVGwNI7uf60ok8HYkANvF0XWV5Oje1rJZwUO4Crqst3vIbXv0C3G",
- "+knrmskjj0UAwLDntAXDKP/psWAs8V3HhEaQfF5LrdPWs7usc/Z9PTtL4ym1WusaiBm7kuCy1OwDdJ2X",
- "c0qq1/4WM837uqXRU0BhCpl9/oMqawnxFhn3+l1XPBBlksMGWg5llzpXpSkoxTYQvpxnO5MMoET7ZFdq",
- "jnlKQy7XEaXc2pPA1zYGu1HZyiLW7hQ5IDhFxbwtT+wxUWOPkoFow7KKtvCn7vAW2dAzZBE27GEdySmO",
- "ZhLxxe1jEQdjG5Dmo+eSx0MbwszN2iiCs2W18dQSYXOyVUlv+LASEbE71f72u6+D4GBEdTKpB698We/K",
- "bRXIQcrYRxi99wOjMocC//5rWPTEi1uub0TGsqYupiIDMNWcZ4zegyY6LGhW0B3J2HIJ0hrzlaY8ozIL",
- "mzNOUpCaMqPZ7NTtxVoDraxgelCyNdwVB/UMJibjol3KApLvnMpwB6kTPTcRidNetVoMPZHY25V4OgHd",
- "Guka46oGiMAlQqNsbQ+Y4CggkYJew5HzKPYH7J8Gy5M4258WOOuYKWK+1lvWVhvFuvthCJHbLXgMcb9n",
- "KCy92OR0SRvNgpZkf0F2afyn5uIc9yyj73AAvNBhGDzM6G03DpwvnBz1U42UYCkfhiihtfxDPki3wEbS",
- "CLbIMQKtwRbCtQH17X0JHMzqZe23HXpDtOvexTqLgttH/npuYcub7Kt9AeGYsyA3NP/8rl0swHmG+IDs",
- "3bAxOPQNhki2qFS3y0x4TUfNHfgB729q/hZd0X8Ds0dRrdQN5USYWqz3wTx4s9DcGi6W/gmvDXByg2Pa",
- "OLYnX5OFy9wuJaRMdUWjG/+6Ru0Kw8emXDbIVh/wvR1a569C34GMl17TIG+aSv2o4694A2FzRL8wUxk4",
- "uVEqj1Ffjywi+IvxqLCE2oHr4roV4GZfPulkbggJ9xzoFoSsHxno1i8ON3Z5NpjLXDqVgv46R9/WLdxG",
- "LupmbWOjNPvI3VfOfUxwZfyVBtMdozstQvCJE4Kgkt+f/E4kLPENQ0EeP8YJHj+euqa/P21/Nsf58eOo",
- "dPbZ4jotjtwYbt4Yxfw6lOlns9kGkko7+1GxPDtEGK0U4eYVUEyC/c0VIvgi75D+ZmNN+kfVvQV3hwA5",
- "i5jIWluTB1MFyb8j8n5dt0iWL/px0koyvcP6iN5+wH6LRqD+UEczuWi4Wj90d58W11BX2Gxinyrlb9cf",
- "BM3xPrJqKze3kMhn5LstLcoc3EH55sHiP+DZX55nJ8+e/MfiLydfnaTw/KsXJyf0xXP65MWzJ/D0L189",
- "P4Eny69fLJ5mT58/XTx/+vzrr16kz54/WTz/+sV/PDB8yIBsAZ34ajyT/42P9SZnb8+TSwNsgxNash9h",
- "Z98FNGTsXxykKZ5EKCjLJ6f+p//pT9gsFUUzvP914op9TNZal+p0Pr+5uZmFXeYrDHZItKjS9dzP03uS",
- "8Oztee0lslYg3FGbJ+ute54UzvDbu+8uLsnZ2/NZ8F796eRkdjJ7gs+bl8BpySank2f4E56eNe773BHb",
- "5PTjp+lkvgaaY2yg+aMALVnqP0mg2c79X93Q1QrkzD3DaH7aPJ17sWL+0QV9fNr3bR6+aDL/2IqNyQ70",
- "xBcP5h99Ib/9rVuV8lxMUNBhJBT7ms0XWB9kbFNQQePhpaCyoeYfUVwe/H3uChrEP6LaYs/D3AeQxVu2",
- "sPRRbw2snR4p1em6Kucf8T9InwFYNn1orrd8jraP+cfWatzn3mravzfdwxabQmTgARbLpS1Muu/z/KP9",
- "N5gItiVIZgQ/G7Ln7Dz1sTrPJqeT74JGL9eQXuNbHtbIh+fl6clJJLcy6EXs8aWLHDJz9p6fPB/RgQsd",
- "dnJV5/odf+HXXNxwgpk4lpdXRUHlDmUkXUmuyM8/ErYk0J2CKT8D8g+6UujwxocDJtNJCz0fPjmk2cjz",
- "OVZT2jW49D/veBr9sb/N3UfTYj/PP7aL9rfoR60rnYmboC9qU9YU0J+vfsaq9ff8hjJt5CMXwolFFfud",
- "NdB87vK1O782KVK9L5j3FfwYuiKiv87rmrXRj11OFfvqTupAI28Z9Z8bqSWUAian74P7//2HTx/MN2la",
- "46fmUjudzzEsai2Unk8+TT92Lrzw44eaxnwZm0kp2Qaz4j58+n8BAAD//229Af14wQAA",
+ "H4sIAAAAAAAC/+x9/XPcNrLgv4Ka96qc+IaSv5K3UdXWO8VOsro4iStWsvee7ctiyJ4ZrDgAFwClmfj8",
+ "v1+hGyBBEpzhSIq9W3U/2Rrio9FoNLob/fF+lqtNpSRIa2Zn72cV13wDFjT+xfNc1dJmonB/FWByLSor",
+ "lJydhW/MWC3kajafCfdrxe16Np9JvoG2jes/n2n4Ry00FLMzq2uYz0y+hg13A9td5Vo3I22zlcr8EOc0",
+ "xMWL2Yc9H3hRaDBmCOVPstwxIfOyLoBZzaXhuftk2I2wa2bXwjDfmQnJlASmlsyuO43ZUkBZmJOwyH/U",
+ "oHfRKv3k40v60IKYaVXCEM7narMQEgJU0ADVbAizihWwxEZrbpmbwcEaGlrFDHCdr9lS6QOgEhAxvCDr",
+ "zezszcyALEDjbuUgrvG/Sw3wO2SW6xXY2bt5anFLCzqzYpNY2oXHvgZTl9YwbItrXIlrkMz1OmE/1May",
+ "BTAu2c/fPmdPnz79yi1kw62FwhPZ6Kra2eM1UffZ2azgFsLnIa3xcqU0l0XWtP/52+c4/2u/wKmtuDGQ",
+ "Pizn7gu7eDG2gNAxQUJCWljhPnSo3/VIHIr25wUslYaJe0KN73VT4vk/6a7k3ObrSglpE/vC8Cujz0ke",
+ "FnXfx8MaADrtK4cp7QZ98yj76t37x/PHjz7825vz7L/9n188/TBx+c+bcQ9gINkwr7UGme+ylQaOp2XN",
+ "5RAfP3t6MGtVlwVb82vcfL5BVu/7MteXWOc1L2tHJyLX6rxcKcO4J6MClrwuLQsTs1qWjk250Ty1M2FY",
+ "pdW1KKCYO+57sxb5muXc0BDYjt2IsnQ0WBsoxmgtvbo9h+lDjBIH163wgQv650VGu64DmIAtcoMsL5WB",
+ "zKoD11O4cbgsWHyhtHeVOe6yYpdrYDi5+0CXLeJOOpouyx2zuK8F44ZxFq6mORNLtlM1u8HNKcUV9ver",
+ "cVjbMIc03JzOPeoO7xj6BshIIG+hVAlcIvLCuRuiTC7FqtZg2M0a7NrfeRpMpaQBphZ/h9y6bf9fr3/6",
+ "kSnNfgBj+Ape8fyKgcxVAcUJu1gyqWxEGp6WEIeu59g6PFypS/7vRjma2JhVxfOr9I1eio1IrOoHvhWb",
+ "esNkvVmAdlsarhCrmAZbazkGEI14gBQ3fDuc9FLXMsf9b6ftyHKO2oSpSr5DhG349s+P5h4cw3hZsgpk",
+ "IeSK2a0clePc3IfBy7SqZTFBzLFuT6OL1VSQi6WAgjWj7IHET3MIHiGPg6cVviJwwiCj4DSzHABHwjZB",
+ "M+50uy+s4iuISOaE/eKZG3616gpkQ+hsscNPlYZroWrTdBqBEafeL4FLZSGrNCxFgsZee3Q4BkNtPAfe",
+ "eBkoV9JyIaFwzBmBVhaIWY3CFE24X98Z3uILbuDLZ2N3fPt14u4vVX/X9+74pN3GRhkdycTV6b76A5uW",
+ "rDr9J+iH8dxGrDL6ebCRYnXpbpulKPEm+rvbv4CG2iAT6CAi3E1GrCS3tYazt/Kh+4tl7LXlsuC6cL9s",
+ "6Kcf6tKK12Llfirpp5dqJfLXYjWCzAbWpMKF3Tb0jxsvzY7tNqlXvFTqqq7iBeUdxXWxYxcvxjaZxjyW",
+ "MM8bbTdWPC63QRk5tofdNhs5AuQo7iruGl7BToODludL/Ge7RHriS/27+6eqStfbVssUah0d+ysZzQfe",
+ "rHBeVaXIuUPiz/6z++qYAJAiwdsWp3ihnr2PQKy0qkBbQYPyqspKlfMyM5ZbHOnfNSxnZ7N/O23tL6fU",
+ "3ZxGk790vV5jJyeykhiU8ao6YoxXTvQxe5iFY9D4CdkEsT0UmoSkTXSkJBwLLuGaS3vSqiwdftAc4Dd+",
+ "phbfJO0Qvnsq2CjCGTVcgCEJmBo+MCxCPUO0MkQrCqSrUi2aHz47r6oWg/j9vKoIHyg9gkDBDLbCWPM5",
+ "Lp+3Jyme5+LFCfsuHhtFcSXLnbscSNRwd8PS31r+FmtsS34N7YgPDMPtVPrEbU1AgxPz74PiUK1Yq9JJ",
+ "PQdpxTX+i28bk5n7fVLnfw0Si3E7TlyoaHnMkY6Dv0TKzWc9yhkSjjf3nLDzft/bkY0bJU0wt6KVvftJ",
+ "4+7BY4PCG80rAtB/obtUSFTSqBHBekduOpHRJWGOznBEawjVrc/awfOQhARJoQfD16XKr/7Czfoezvwi",
+ "jDU8fjgNWwMvQLM1N+uTWUrKiI9XO9qUI+YaooLPFtFUJ80S72t5B5ZWcMujpXl402IJoR77IdMDndBd",
+ "fsL/8JK5z+5sO9ZPw56wS2Rgho6zf2QonLZPCgLN5BqgFUKxDSn4zGndR0H5vJ08vU+T9ugbsin4HfKL",
+ "wB1S23s/Bl+rbQqGr9V2cATUFsx90IcbB8VICxszAb4XHjKF++/Rx7XmuyGScewpSHYLdKKrwdMg4xvf",
+ "zdIaZ88XSt+O+/TYimStyZlxN2rEfOc9JGHTuso8KSbMVtSgN1D7yrefafSHT2Gsg4XXlv8BWDBu1PvA",
+ "Qneg+8aC2lSihHsg/XWS6S+4gadP2Ou/nH/x+MlvT7740pFkpdVK8w1b7CwY9pnXzZixuxI+H64MtaO6",
+ "tOnRv3wWDJXdcVPjGFXrHDa8Gg5FBlASgagZc+2GWOuiGVfdADjlcF6C4+SEdka2fQfaC2GchLVZ3Mtm",
+ "jCGsaGcpmIekgIPEdOzy2ml28RL1Ttf3ocqC1kon7Gt4xKzKVZldgzZCJV5TXvkWzLcI4m3V/52gZTfc",
+ "MDc3mn5riQJFgrLsVk7n+zT05Va2uNnL+Wm9idX5eafsSxf5wZJoWAU6s1vJCljUq44mtNRqwzgrsCPe",
+ "0d+BRVHgUmzgteWb6qfl8n5URYUDJVQ2sQHjZmLUwsn1BnIlyRPigHbmR52Cnj5igonOjgPgMfJ6J3O0",
+ "M97HsR1XXDdC4qOH2ck80mIdjCUUqw5Z3l1bHUMHTfXAJMBx6HiJn9HQ8QJKy79V+rK1BH6nVV3du5DX",
+ "n3PqcrhfjDelFK5v0KGFXJVd75uVg/0ktcZPsqDn4fj6NSD0SJEvxWptI7XilVZqef8wpmZJAYofSCkr",
+ "XZ+havajKhwzsbW5BxGsHazlcI5uY77GF6q2jDOpCsDNr01aOBvx18CHYnzftrG8Z9ekZy3AUVfOa7fa",
+ "umL4eju4L9qOGc/phGaIGjPydtU8OlIrmo58AUoNvNixBYBkauEfiPzTFS6S49OzDeKNFw0T/KIDV6VV",
+ "DsZAkXnD1EHQQju6OuwePCHgCHAzCzOKLbm+M7BX1wfhvIJdho4Shn32/a/m808Ar1WWlwcQi21S6G3U",
+ "fP8KOIR62vT7CK4/eUx2XAML9wqzCqXZEiyMofAonIzuXx+iwS7eHS3XoPE97g+l+DDJ3QioAfUPpve7",
+ "QltXI+5/Xr11Ep7bMMmlCoJVarCSG5sdYsuuUUcHdyuIOGGKE+PAI4LXS24svSELWaDpi64TnIeEMDfF",
+ "OMCjaogb+deggQzHzt09KE1tGnXE1FWltIUitQYJ2z1z/QjbZi61jMZudB6rWG3g0MhjWIrG98iilRCC",
+ "uG2eWryTxXBx+CDh7vldEpUdIFpE7APkdWgVYTd2gRoBRJgW0UQ4wvQop/G7ms+MVVXluIXNatn0G0PT",
+ "a2p9bn9p2w6Ji9v23i4UGPS88u095DeEWXJ+W3PDPBxsw6+c7IFmEHrsHsLsDmNmhMwh20f5qOK5VvER",
+ "OHhI62qleQFZASXfDQf9hT4z+rxvANzxVt1VFjLyYkpvekvJwWlkz9AKxzMp4ZHhF5a7I+hUgZZAfO8D",
+ "IxeAY6eYk6ejB81QOFdyi8J4uGza6sSIeBteK+t23NMDguw5+hSAR/DQDH17VGDnrNU9+1P8Fxg/QSNH",
+ "HD/JDszYEtrxj1rAiA3VO4hH56XH3nscOMk2R9nYAT4ydmRHDLqvuLYiFxXqOt/D7t5Vv/4EyWdGVoDl",
+ "ooSCRR9IDazi/oz8b/pj3k4VnGR7G4I/ML4lllMKgyJPF/gr2KHO/YocOyNTx33osolR3f3EJUNAg7uY",
+ "E8HjJrDluS13TlCza9ixG9DATL3YCGvJYbur6lpVZfEAyXeNPTP6Rzxyigw7MOVV8TUOFS1vuBXzGekE",
+ "++G77CkGHXR4XaBSqpxgIRsgIwnBJH8PVim368L7jgfv4UBJHSA908YX3Ob6f2A6aMYVsP9SNcu5RJWr",
+ "ttDINEqjoIACpJvBiWDNnN6zo8UQlLAB0iTxy8OH/YU/fOj3XBi2hJsQcOEa9tHx8CHacV4pYzuH6x7s",
+ "oe64XSSuD3zwcRef10L6POWwZ4EfecpOvuoN3rwSuTNljCdct/w7M4DeydxOWXtMI9O8KnDcSW850dCp",
+ "deO+vxabuuT2Pl6t4JqXmboGrUUBBzm5n1go+c01L39qumEwCeSORnPIcgyBmDgWXLo+FDVxSDdsvcnE",
+ "ZgOF4BbKHas05EBe/k7kMw2MJ4z8//I1lyuU9LWqV94BjcZBTl0bsqnoWg6GSEpDdisztE6nOLd3Og6B",
+ "Hk4OAu50sb5pmzSPG97M52N7plypEfL6pv7k69Z8NqqqOqRet6oqIacbrTKBi3cEtQg/7cQT30AQdU5o",
+ "GeIr3hZ3Ctzm/jG29nboFJTDiSOXuPbjmFec05PL3T1IKzQQ01BpMHi3xPYlQ1/VMo5M85eP2RkLm6EJ",
+ "nrr+NnL8fh5V9JQshYRsoyTsksHYQsIP+DF5nPB+G+mMksZY377y0IG/B1Z3ninUeFf84m73T2j/qcl8",
+ "q/R9vWXSgJPl8glPhwffyf2Ut33g5GWZeBP0cSt9BmDmTZy80Iwbo3KBwtZFYeZ00Pwzog9y6aL/VeON",
+ "ew9nrz9u7/ErDolE4y6UFeMsLwWafpU0Vte5fSs5GpeipSa8loIWPW5ufB6apO2bCfOjH+qt5Oix1pic",
+ "kp4WS0jYV74FCFZHU69WYGxPSVkCvJW+lZCslsLiXBt3XDI6LxVodB06oZYbvmNLRxNWsd9BK7aobVds",
+ "x7AsY0VZ+pc4Nw1Ty7eSW1YCN5b9IOTlFocLr/XhyEqwN0pfNVhI3+4rkGCEydLeVd/RV3R89ctfeydY",
+ "DKOnz/R248ZvY7d2aHtqQ8P/z2f/efbmPPtvnv3+KPvqf5y+e//sw+cPBz8++fDnP//f7k9PP/z58//8",
+ "99ROBdhTQUMe8osXXqW9eIF6S/t4M4D9oxnuN0JmSSKL3TB6tMU+wwBZT0Cfd61adg1vpd1KR0jXvBSF",
+ "4y23IYf+DTM4i3Q6elTT2YieFSus9Uht4A5chiWYTI813lqKGjokpsPz8DXRR9zheVnWkrYySN8UfRIc",
+ "w9Ry3oRgUnaWM4bxeWsevBr9n0+++HI2b+Pqmu+z+cx/fZegZFFsU9GTBWxTSp4/IHgwHhhW8Z0Bm+Ye",
+ "CHvSB46cMuJhN7BZgDZrUX18TmGsWKQ5XPDp98airbyQ5Gzvzg++Te78k4dafny4rQYooLLrVNaGjqCG",
+ "rdrdBOj5i1RaXYOcM3ECJ31jTeH0Re+NVwJfYvYA1D7VFG2oOQdEaIEqIqzHC5lkEUnRD4o8nlt/mM/8",
+ "5W/uXR3yA6fg6s/ZPESGv61iD7775pKdeoZpHlAgLw0dhV4mVGkfXdTxJHLcjHLVkJD3Vr6VL2AppHDf",
+ "z97Kglt+uuBG5Oa0NqC/5iWXOZysFDsLAUsvuOVv5UDSGk0nFYWKsapelCJnV7FC0pInpQgZjvD27Rte",
+ "rtTbt+8GThVD9cFPleQvNEHmBGFV28wnOMg03HCderQyTYA7jkwZTPbNSkK2qsmyGRIo+PHTPI9XlekH",
+ "ug6XX1WlW35EhsaHcbotY8YqHWQRJ6AQNLi/Pyp/MWh+E+wqtQHD/rbh1Rsh7TuWva0fPXoKrBP5+Td/",
+ "5Tua3FUw2boyGojbN6rgwkmthK3VPKv4KvU29vbtGwu8wt1HeXmDNo6yZNitE3EaPOpxqHYBAR/jG0Bw",
+ "HB09h4t7Tb1CMqv0EvATbiG2ceJG+2J/2/2KYlBvvV29ONbBLtV2nbmznVyVcSQedqbJcbNyQlZwozBi",
+ "hdqqTwe0AJavIb/yeVpgU9ndvNM9eOp4QTOwDmEogw9FkGEOCXxZWACrq4J7UZzLXT+Y34C1wR/4Z7iC",
+ "3aVqU1AcE73fDSY3YwcVKTWSLh2xxsfWj9HffO8Ohop9VYWYbAzOC2Rx1tBF6DN+kEnkvYdDnCKKTrDz",
+ "GCK4TiCCiH8EBbdYqBvvTqSfWp7TMhZ08yWy+QTez3yTVnnynlvxatDqTt83gOnA1I1hC+7kduUzWVHA",
+ "dMTFasNXMCIhx487E8OSOw9COMihey9506ll/0Ib3DdJkKlx5tacpBRwXxypoDLT89cLM9H7oX+ZwASV",
+ "HmGLEsWkxrGRmA7XnUc2yrg3BlqagEHLVuAIYHQxEks2a25Cki3MRRbO8iQZ4A9MALAv7ctF5GoWJRxr",
+ "kroEnts/pwPt0id/CRlfQpqXWLWckLLFSfjo3Z7aDiVRACqghBUtnBoHQmmTEbQb5OD4abkshQSWpbzW",
+ "IjNodM34OcDJxw8ZIws8mzxCiowjsPFdHAdmP6r4bMrVMUBKn0yBh7HxRT36G9JxX+TH7UQeVTkWLkZe",
+ "tfLAAbh3dWzur57DLQ7DhJwzx+aueenYnNf42kEG2UdQbO3lGvGeGZ+PibN7HkDoYjlqTXQV3WY1scwU",
+ "gE4LdHsgXqhtRoGfSYl3sV04ek+6tmMYaupgUp6XB4Yt1Ba9ffBqIVfqA7CMwxHAiDT8rTBIr9hv7DYn",
+ "YPZNu1+aSlGhQZLx5ryGXMbEiSlTj0gwY+TyWZS65VYA9IwdbR5kr/weVFK74snwMm9vtXmbkixEDaWO",
+ "/9gRSu7SCP6GVpgm2cqrvsSStFN0nVa6eWYiETJF9I5NDB9phk9BBkpApSDrCFHZVerl1Ok2gDfO69At",
+ "Ml5gNhsud59HnlAaVsJYaI3owU/iU5gnOSbRU2o5vjpb6aVb389KNdcUPSNix84yP/oK0JV4KbSxGb5A",
+ "JJfgGn1rUKn+1jVNy0pdXytKOSuKNG/Aaa9glxWirNP06uf9/oWb9seGJZp6gfxWSHJYWWCK5KQH5p6p",
+ "yUl374Jf0oJf8ntb77TT4Jq6ibUjl+4c/yLnosd597GDBAGmiGO4a6Mo3cMgo8jZIXeM5Kbojf9kn/V1",
+ "cJiKMPZBr50Qvzt2R9FIybVEBoO9qxD4TOTEEmGjDMPDkNaRM8CrShTbni2URh3VmPlRBo+Ql62HBdxd",
+ "P9gBDER2z1RUjQbTTcHXCviUK7qTAedkEmYuu4nyYoYQTyVMqHQwRFQTdXcIV5fAy+9h96tri8uZfZjP",
+ "7mY6TeHaj3gA16+a7U3iGZ/myZTWeQk5EuW8qrS65mXmDcxjpKnVtSdNbB7s0R+Z1aXNmJffnL985cH/",
+ "MJ/lJXCdNaLC6KqwXfUvsyrK9jdyQEImdafzBZmdRMlo85sUZbFR+mYNPiV1JI0Ocme2Dw7RUfRG6mXa",
+ "Q+igydm/jdAS97yRQNU8kbTmO3oh6b6K8GsuymA3C9COePPg4qYlYE1yhXiAO7+uRI9k2b2ym8HpTp+O",
+ "lroO8KR4rj1JszeUF94wJftP6OjzvKv8q/uGY+ZLsooMmZOsN2hJyEwp8rSNVS6MIw5Jb2euMcPGI8Ko",
+ "G7EWI0+xshbRWK7ZlNw2PSCjOZLINMn0Oi3uFsrX/Kml+EcNTBQgrfuk8VT2DiqmSfHW9uF16mSH4Vx+",
+ "YLLQt8PfRcaIs772bzwEYr+AEb/UDcB90ajMYaGNRcr9ED1JHPHgH884uBL3PNZ7+vDUTM6L6+6LW1yi",
+ "Z8j/HGFQrvbD9YGC8urTz47Mkaz3I0y21Op3SOt5qB4nApZCnluBXi6/QxzoEFe56LCYxrrTli1qZx/d",
+ "7jHpJrZCdZ0URqgedz56lsOEm8FCzSVtNQWSdHzd0gQTe5We0vgtwXiYB564Jb9Z8FQ2UidkOJjO2wfg",
+ "ji3dKhY6B9ybJtqCZmfRW3LTVlAwegW6jSUcJra5pcBA004WFVrJAKk2lgnm9P5XGpUYppY3XFIVF9eP",
+ "jpLvbYCMX67XjdKYSsKkzf4F5GLDy7TkUORDE28hVoIKlNQGogoYfiAq/kRU5KuINDFEHjUXS/ZoHpXh",
+ "8btRiGthxKIEbPGYWiy4QU7eGKKaLm55IO3aYPMnE5qva1loKOzaEGKNYo1Qh+pN83i1AHsDINkjbPf4",
+ "K/YZPtsZcQ2fOyz6+3l29vgrNLrSH49SF4AvMLOPmxTITv7q2UmajvHdksZwjNuPepKMuqcKc+OMa89p",
+ "oq5TzhK29Lzu8FnacMlXkPYU2RyAifribqIhrYcXWVB5JGO12jFh0/OD5Y4/jXifO/ZHYLBcbTbCbvzj",
+ "jlEbR09teQuaNAxHtZZ8ZuIAV/iIb6RVeCLqKZEf12hK91tq1fiS/SPfQBetc8Ypf0gpWu+FkC+dXYT0",
+ "RJiqucnQTLhxc7mlo5iDzgxLVmkhLSoWtV1mf2L5mmueO/Z3MgZutvjyWSI9dTdNqjwO8I+Odw0G9HUa",
+ "9XqE7IMM4fuyz6SS2cZxlOLzNtojOpWjj7npZ7uxt8P9Q08Vytwo2Si51R1y4xGnvhPhyT0D3pEUm/Uc",
+ "RY9Hr+yjU2at0+TBa7dDv/z80ksZG6VTOQfb4+4lDg1WC7hG3730Jrkx77gXupy0C3eB/tO+PASRMxLL",
+ "wllOKgLXm1+DWXbUZ9+J8L/+4MspDmTvET8DciRo+nzkWISkSxJJaOjGx3DV7G+P/8Y0LH2BxIcPEeiH",
+ "D+demPvbk+5nYlIPH6Yz8SRtGu7XFgtHscJ+pgLXN7WHX6uEhSGkvW9eQ3y8QcLCM8Zq3Qd3lBd+qDnr",
+ "phj/+Hfh/XiypV8r06fg7ds3+CXgAf/oI+ITH3ncwNYfg1YyQihRiYUkyRTN98hPgrOv1XYq4fQ4aSCe",
+ "fwIUJVFSi7L4tY3e7bE2zWW+Tr57LlzH39pae83i6PAmU0CuuZRQJocjneG3oFsktJ+/q6nzbISc2LZf",
+ "VIOW21tcC3gXzABUmNChV9jSTRBjtRsY2TjelytVMJynzTfYHtdhMZYoZf4/ajA2dWHhB3L+Q/u2YweU",
+ "sZ2BLNCqcMK+o3Laa2CdZFKozYdsH93I97oqFS/mmIXk8pvzl4xmpT5UMYoyxq9Qme2uomfXjFKpTnMj",
+ "D8Wf0iEu08fZ73PvVm1s1iR4TwURuxZtCnrRe+tBNTfGzgl7ERXGpXhjNwTDJDR64zTzZjSScZEm3H+s",
+ "5fkaVfcOax0n+emlDgJVmqi8aFMmrMkviufOwe2rHVCxgzlTdg36RhiqogzX0I1bboL4vekoxDF3l6dr",
+ "KYlSTo645ZpsoseiPQBHV2R4DkpC1kP8kYobVQo5tvLDa+yVTHfWLyMxqCtKUbBN+adQHT/nUkmRY7Kx",
+ "1BXtyy1PeSudkJetb4wPR9yf0MThShavaNwpPRZHy1kERugRN3ysib66TSXqoD8t1vVdc8tWYI3nbFDM",
+ "Qw0Wby8W0oDPF4vFuSM+qXTn/Rk5ZNKlIWuevo4kIwyfGjEAfOu+/ejNQxhXcCUkKoIebV7wI4suVoO1",
+ "TnsUlq0UGL+ebgy5eeP6nGA4dQHbdyeheiyOQc+3btnkqzAc6jx4LnhPAdf2uWvrk1w1P3c81WnS86ry",
+ "k45X6EnKA3YrRxGceIHOwhNghNxm/Hi0PeS21+UI71NHaHCNDgtQ4T08IIymWk2vEpoTWomisAUjV79k",
+ "pgshE2C8FBLa2saJCyJPXgm4MXheR/qZXHNLIuAknnYJvCSFOsHQjPVPVHcdqp/iy6EE1xjmGN/GttDO",
+ "CONoGrSCG5e7pqSyo+5ImHiOtdw9Iodlc1Cq8kJUgZEnvUI6KcbhGHco1dW9AEb0/I5MRN0x392xN9FY",
+ "MPGiLlZgM14UqfS9X+NXhl9ZUaPkAFvI6ybNa1WxHHPndJMJDanNT5QraerNnrlCgztOF1WmSlBDXB0r",
+ "7DAGKy12+G8qx+n4znhnnaPdRYNnTnFcBq2h+2tK6nU0nRmxyqZjAu+Uu6Ojnfp2hN72v1dKL9WqC8in",
+ "MNuNcLl4j1L87Rt3ccQZNgaJe+lqaRJgoHOmCvVEUW1sQre7XAmvskEmX3wUbOoV7jdAjFcenOPlN+Ki",
+ "HRth6X4lw+SYo3Y+GlfArY9wtJztZUGjUWPk5dUz6w4t7GOeXeTYdX/mUL/WvQgNLoNDgL4P/sis4sK7",
+ "ULTMYohZH7kwjCWZ4tPcbnB/ET4eYNRi9/31mO9+SKiH3/uVya7Apz2oNFwLVQfnhOC9FlRC+rVT56uJ",
+ "nkiuf2h4xak+rTl01Hh76StE0DK9Tv79r+TryEBavfsnMOUONn1Q82wo7ZJ5qm3CmuTik5KNd27FKckm",
+ "U3kNvWzYqbp2oGbcgKxeTBEHhjXg5rOL4qgLM5Ubc0ajpI5duqLbeOqwNl0YHrFKGdHm+E+VepvoJnqJ",
+ "1dqi1GfDsYKP1jXkFgs7tL4nGuCYRGhusqh47P9PITaiTjfetD5z2L50YcNqDgfu+EFEXxSVSpnwT6Yn",
+ "xzpvPAyRT2NG6xVIX7+1G6szOWJguYTciusDEZR/XYOMovPmwS5DddijgErReKBjAp7jrY4tQPsCHPfC",
+ "EyXCvDM4Y/FTV7B7YFiHGpKp+efhqr1N7hXEAHKHzJGIMikPHjIke6cKYRrKQCwEjznqDm0Wu9GqXlE8",
+ "8C3nCiTpLo42RnjPlOmyQpPmcl2PipxHZ+qxIMthVZJx/eMFFoExTcXNkLsl1tLZxTDD5Y3P/YLxrs3b",
+ "ScgCAyb8FoLbaZZSXEFcdwxfqm64LkKLpOklWHWyPffRIDIyVNToA71sZhatf/MwFi6RMw292PNSOTEi",
+ "GwsF6LoUN/44Dww5TlEKf3SWdnAtQfv6jCj/lspAZlXwh94Hxz5UkHfYrZBgRvOUEnCj2YN+btMjYb5m",
+ "jtmCuHcKixfINGy4g05HSYzG59yH7Of0PQR/hXy9By1MDb0eLhwRPNuFGSAxpvol87fl4aCy2xibhJRU",
+ "A9ykMhpJ0N3XkEqros7pgo4PRmOQm5wvbA8rSdpp8uEqezpCFJl7BbtTUoJCxY2wgzHQJDkR6FEmjN4m",
+ "36v5zaTgXt0LeJ/ScjWfVUqV2chjx8UwDVOf4q9EfgUFczdF8AAdqYLEPkMbe/OafbPehbRDVQUSis9P",
+ "GDuX5HMfHra7ecB7k8sHdt/8W5y1qCkzmjeqnbyVaedlzFmm78jNwjD7eZgBx+ruOBUNciDJz3YkBZTm",
+ "N4maYCdTtfLhU3O/TlNLVARFSiZ5TS9Wz/GgpwxHN1pY8I4NdIm7jWT+pYuZUqWcBOFmWvx+41DqdqRU",
+ "Ixd3PBkCZEFOifNsoPCDJxHQ1GA64CjU+Ai15WtaP6GheFSW6ibDY5Q1SexSSpdr170lQtretpsjtwVE",
+ "DkfceAlix9a8YLnSGvK4RzpOh4DaKA1ZqdD/KPU0urROINygc75kpVoxVTk9n3JBhkekZG2laK77qiNF",
+ "MecEQUYvXiNZPcD4GHMPLjUewrunlNPxZaIu1wnDFW5Y2K2ja0F5gju6hEsE5gRCP2y0O0+Vuuquq190",
+ "bawEolUbkafR/a/lrjPqZJOi3hQqfBZliuLEZnjAY57SvM7i6RmiGSRflEle7Y+ff6VCOnf/xSu8Py5b",
+ "gmcuI/wsUbOZ2HCWj14WPQAQUgotsrWm1MsxK28KuqkVhSLiG1sf0IkMB10Z7gabG+E+gfqwn1BSFd8S",
+ "B6HZHV+QLsRSjxyqpJPEfp8EqgK6mOqZ0GSan8g/IwDGfRU6MEzyWDgWjCVW1c14AskXjZ447xQ9F71L",
+ "ImQBJWaYc7ITrYG5sWsNPraXyn/26o1V3K6D3OiaD605soAtGAy8paJJ3JDtMdhAfe3RvkCuqqyEa+i4",
+ "cPiA4zrPwRhxDXHdUurMCoAKXwT6emrKNyG+DnvKi197Fr1uT8FuUpshxNJOsQOqSlKx2sqMjomZepQc",
+ "RNeiqHkHf+YOFRzHijcm7usA67tpnOJoJpFe3D4WcdCbCGk+eS5l2pkojndvzJA4W9E8VxARtifbVPxG",
+ "jqvtQ6Jsxc3ptU8jxH6zhRyv7q63zN1xwnAwZnq5LEblTN3s8G3NP6NUto/IBpVg03oYhErecdqpoCv4",
+ "vomrkQzVwiQGEKblDeh7C61vZ9Rsw3esEMslaHqKM5bLgusibi4ky0FbLiS74Ttze53MQatrmB9Uyxyn",
+ "xkEDs0opaGhVJkDKnVf4x1SmCaoOvrsm1By6tq0aK1I72JV0MBDfOtUQvSJHiMCnokDFkA6rkiiVsw2/",
+ "giPnMeJ32D8NJojylnurcNYpU3zYS+s/IerwwP8ihd1L7STv9d1U6R2RiDHQoFy1zgy0OUMaTHkWX1Kp",
+ "tNi7uF95JOw1GTVpPhjJpNoV00d2Ec063i09lsnNdHW1YzlK+S8TD8+Qt5s97gpgolptuTc3D8WSwaVA",
+ "SJl77+8jpRZSF3hRiLHS+Gvw6cr92epO25gA3TjTLd2RvSsNUaWqLJ/yhlVACY7VkNbiIe3COMFGVuUH",
+ "roXkJTnClboqkloif8BjQaIBevs0F+K874fWFQKag4d1l/Naoxh7w3eHU2K2gkDahZ9GDjp48ExqoPYb",
+ "TEfcUCmfZMbJYwTEBNdJVbMZ5vq7/8VQbEr7ev7HLce/j6UXcC69ooQ1CvfRW6tKBVJJ0BqXuxTTCC9A",
+ "t1jgmHw4wbv63raqOS1/xAYlL8nbpYCeBNrQ0zaBzahm+37npzhDfJu2QJPDNjpLBI20zy9+aDXVadXj",
+ "Q4cD4MU+cVH9+PA86cH5xPH/PzRIiZbybowSOss/5GbnF9iq9tEWeWnZWqB6HRQz2t2XyIfSPG9cE0eu",
+ "5oEHI6aDd+JZWSY8H0mAp+LiEeG4e1Ff8/Ljey9inYBzxAcUP4/7O8TubzGSCZXmdsG3L/mkuSNXt/ub",
+ "Wr5Cb8u/gtuj5LXgh/I2gwHzR/WLl/Q0tQyVhq9Bshsckyy2j79kC59gqtKQC9O3RdyEIoCNtxfWxPUB",
+ "z1t7wL3s0Dp/VfYOZLwMpj32Y1tQDF9fVrKFsD2in5ipjJzcJJWnqG9AFgn8pXhUnOn5wHVx1YnhaKW6",
+ "6EZTGu45liOKyjwylmOYw3rq8ihewV06tYHhOiff1h3cJi7qdm1TA5EmZ4PCak9T4ofSmZtcdwxgupcU",
+ "TkclcPoDQpcIR34MP2+KYn4dS2ZBCRtG8qb09qMWZXGIMDpZcD40NfIxz8tvPl/ax71LAwTkTj08qr5k",
+ "9R1iQAgxibV2Jo+mivLbTEht47slEtmgq1Jea2F3mMY9aLzit2SQ1XeNw74P+GiMqP7us+oKmkIArXt/",
+ "bcLt+p3iJd5HZNuV7hZS5Qn7Zss3VeltIuzPDxb/AU//9Kx49PTxfyz+9OiLRzk8++KrR4/4V8/446+e",
+ "PoYnf/ri2SN4vPzyq8WT4smzJ4tnT559+cVX+dNnjxfPvvzqPx44PuRAJkBnIWno7H9n5+VKZeevLrJL",
+ "B2yLE16J72FH5csdGYfC6DzHkwgbLsrZWfjpf4YTdpKrTTt8+HXmcxLO1tZW5uz09Obm5iTucrpCf97M",
+ "qjpfn4Z5BpXTz19dNO/m9OyCO9p4TJEvjieFc/z28zevL9n5q4uTlmBmZ7NHJ49OHrvxVQWSV2J2NnuK",
+ "P+HpWeO+n3pim529/zCfna6Blxj+4v7YgNUiD5808GLn/29u+GoF+sRXi3c/XT85DWLF6Xvv1/xh37fT",
+ "uPDi6fuO+3dxoCcWZjt9H/KN72/dSejt3d6jDhOh2NfsdIEp8KY2BRM1Hl8KKhvm9D2Ky6O/n/qcXemP",
+ "qLbQeTgNMRLplh0svbdbB2uvR85tvq6r0/f4H6TPCCyKkD+1W3mKDwSn7zur8Z8Hq+n+3naPW1xvVAEB",
+ "YLVcUv2EfZ9P39O/0USwrUALJ/hRVIp/DGmO1UUxO5t9EzV6vob8CksO0ksYnpcnjx4l0odEvRgdX74o",
+ "oXBn79mjZxM6SGXjTj459rDjL/JKqhvJMNiceHm92XC9QxnJ1loa9tP3TCwZ9KcQJsyA/IOvDJprsb7Z",
+ "bD7roOfdB480Cq48xaSvuxaX4eedzJM/Dre5X9s59fPp+25tsQ79mHVtC3UT9UVtikwBw/maarudv09v",
+ "uLBOPvJRSpj7fdjZAi9PfUqi3q9tFoDBF0xtEP0Yv/0nfz1tSmskP/Y5VeqrP6kjjcLzYfjcSi2xFDA7",
+ "exPd/2/efXjnvulrfGN58z661M5OT9Hzf62MPZ19mL/vXXjxx3cNjYVMjbNKi2tM/PDuw/8LAAD//yFC",
+ "1ZtnzgAA",
}
// GetSwagger returns the content of the embedded swagger specification file
diff --git a/daemon/algod/api/server/v2/generated/model/types.go b/daemon/algod/api/server/v2/generated/model/types.go
index 3589e9e11..ae20c2896 100644
--- a/daemon/algod/api/server/v2/generated/model/types.go
+++ b/daemon/algod/api/server/v2/generated/model/types.go
@@ -407,6 +407,18 @@ type AssetParams struct {
UrlB64 *[]byte `json:"url-b64,omitempty"`
}
+// AvmValue Represents an AVM value.
+type AvmValue struct {
+ // Bytes bytes value.
+ Bytes *[]byte `json:"bytes,omitempty"`
+
+ // Type value type. Value `1` refers to **bytes**, value `2` refers to **uint64**
+ Type uint64 `json:"type"`
+
+ // Uint uint value.
+ Uint *uint64 `json:"uint,omitempty"`
+}
+
// Box Box name and its content.
type Box struct {
// Name \[name\] box name, base64 encoded
@@ -630,6 +642,15 @@ type PendingTransactionResponse struct {
Txn map[string]interface{} `json:"txn"`
}
+// ScratchChange A write operation into a scratch slot.
+type ScratchChange struct {
+ // NewValue Represents an AVM value.
+ NewValue AvmValue `json:"new-value"`
+
+ // Slot The scratch slot written.
+ Slot uint64 `json:"slot"`
+}
+
// SimulateRequest Request type for simulation endpoint.
type SimulateRequest struct {
// AllowEmptySignatures Allow transactions without signatures to be simulated as if they had correct signatures.
@@ -638,6 +659,9 @@ type SimulateRequest struct {
// AllowMoreLogging Lifts limits on log opcode usage during simulation.
AllowMoreLogging *bool `json:"allow-more-logging,omitempty"`
+ // ExecTraceConfig An object that configures simulation execution trace.
+ ExecTraceConfig *SimulateTraceConfig `json:"exec-trace-config,omitempty"`
+
// ExtraOpcodeBudget Applies extra opcode budget during simulation for each transaction group.
ExtraOpcodeBudget *uint64 `json:"extra-opcode-budget,omitempty"`
@@ -651,6 +675,18 @@ type SimulateRequestTransactionGroup struct {
Txns []json.RawMessage `json:"txns"`
}
+// SimulateTraceConfig An object that configures simulation execution trace.
+type SimulateTraceConfig struct {
+ // Enable A boolean option for opting in execution trace features simulation endpoint.
+ Enable *bool `json:"enable,omitempty"`
+
+ // ScratchChange A boolean option enabling returning scratch slot changes together with execution trace during simulation.
+ ScratchChange *bool `json:"scratch-change,omitempty"`
+
+ // StackChange A boolean option enabling returning stack changes together with execution trace during simulation.
+ StackChange *bool `json:"stack-change,omitempty"`
+}
+
// SimulateTransactionGroupResult Simulation result for an atomic transaction group
type SimulateTransactionGroupResult struct {
// AppBudgetAdded Total budget added during execution of app calls in the transaction group.
@@ -674,6 +710,9 @@ type SimulateTransactionResult struct {
// AppBudgetConsumed Budget used during execution of an app call transaction. This value includes budged used by inner app calls spawned by this transaction.
AppBudgetConsumed *uint64 `json:"app-budget-consumed,omitempty"`
+ // ExecTrace The execution trace of calling an app or a logic sig, containing the inner app call trace in a recursive way.
+ ExecTrace *SimulationTransactionExecTrace `json:"exec-trace,omitempty"`
+
// LogicSigBudgetConsumed Budget used during execution of a logic sig transaction.
LogicSigBudgetConsumed *uint64 `json:"logic-sig-budget-consumed,omitempty"`
@@ -696,6 +735,39 @@ type SimulationEvalOverrides struct {
MaxLogSize *uint64 `json:"max-log-size,omitempty"`
}
+// SimulationOpcodeTraceUnit The set of trace information and effect from evaluating a single opcode.
+type SimulationOpcodeTraceUnit struct {
+ // Pc The program counter of the current opcode being evaluated.
+ Pc uint64 `json:"pc"`
+
+ // ScratchChanges The writes into scratch slots.
+ ScratchChanges *[]ScratchChange `json:"scratch-changes,omitempty"`
+
+ // SpawnedInners The indexes of the traces for inner transactions spawned by this opcode, if any.
+ SpawnedInners *[]uint64 `json:"spawned-inners,omitempty"`
+
+ // StackAdditions The values added by this opcode to the stack.
+ StackAdditions *[]AvmValue `json:"stack-additions,omitempty"`
+
+ // StackPopCount The number of deleted stack values by this opcode.
+ StackPopCount *uint64 `json:"stack-pop-count,omitempty"`
+}
+
+// SimulationTransactionExecTrace The execution trace of calling an app or a logic sig, containing the inner app call trace in a recursive way.
+type SimulationTransactionExecTrace struct {
+ // ApprovalProgramTrace Program trace that contains a trace of opcode effects in an approval program.
+ ApprovalProgramTrace *[]SimulationOpcodeTraceUnit `json:"approval-program-trace,omitempty"`
+
+ // ClearStateProgramTrace Program trace that contains a trace of opcode effects in a clear state program.
+ ClearStateProgramTrace *[]SimulationOpcodeTraceUnit `json:"clear-state-program-trace,omitempty"`
+
+ // InnerTrace An array of SimulationTransactionExecTrace representing the execution trace of any inner transactions executed.
+ InnerTrace *[]SimulationTransactionExecTrace `json:"inner-trace,omitempty"`
+
+ // LogicSigTrace Program trace that contains a trace of opcode effects in a logic sig.
+ LogicSigTrace *[]SimulationOpcodeTraceUnit `json:"logic-sig-trace,omitempty"`
+}
+
// StateDelta Application state delta.
type StateDelta = []EvalDeltaKeyValue
@@ -1063,6 +1135,9 @@ type SimulateResponse struct {
// EvalOverrides The set of parameters and limits override during simulation. If this set of parameters is present, then evaluation parameters may differ from standard evaluation in certain ways.
EvalOverrides *SimulationEvalOverrides `json:"eval-overrides,omitempty"`
+ // ExecTraceConfig An object that configures simulation execution trace.
+ ExecTraceConfig *SimulateTraceConfig `json:"exec-trace-config,omitempty"`
+
// LastRound The round immediately preceding this simulation. State changes through this round were used to run this simulation.
LastRound uint64 `json:"last-round"`
diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go b/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go
index 41fef3739..08f34c9b0 100644
--- a/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go
+++ b/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go
@@ -130,181 +130,190 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9a3PcNrLoX0HNnio/7lAjP3etqtS5ip1kdeM4LkvJ3nNs3wRD9sxgRQIMAI5m4qv/",
- "fgoNgARJcIYjKfamaj/ZGuLRaDQa/UL3p0kqilJw4FpNTj5NSippARok/kXTVFRcJywzf2WgUslKzQSf",
- "nPhvRGnJ+HIynTDza0n1ajKdcFpA08b0n04k/FYxCdnkRMsKphOVrqCgZmC9LU3reqRNshSJG+LUDnH2",
- "anK94wPNMglK9aH8kedbwniaVxkQLSlXNDWfFLliekX0iiniOhPGieBAxILoVasxWTDIM3XkF/lbBXIb",
- "rNJNPryk6wbERIoc+nC+FMWccfBQQQ1UvSFEC5LBAhutqCZmBgOrb6gFUUBluiILIfeAaoEI4QVeFZOT",
- "9xMFPAOJu5UCW+N/FxLgd0g0lUvQk4/T2OIWGmSiWRFZ2pnDvgRV5VoRbItrXLI1cGJ6HZEfKqXJHAjl",
- "5N23L8mTJ09emIUUVGvIHJENrqqZPVyT7T45mWRUg//cpzWaL4WkPEvq9u++fYnzn7sFjm1FlYL4YTk1",
- "X8jZq6EF+I4REmJcwxL3oUX9pkfkUDQ/z2EhJIzcE9v4TjclnP+L7kpKdboqBeM6si8EvxL7OcrDgu67",
- "eFgNQKt9aTAlzaDvj5MXHz89mj46vv7L+9Pkv92fz55cj1z+y3rcPRiINkwrKYGn22QpgeJpWVHex8c7",
- "Rw9qJao8Iyu6xs2nBbJ615eYvpZ1rmleGTphqRSn+VIoQh0ZZbCgVa6Jn5hUPDdsyozmqJ0wRUop1iyD",
- "bGq479WKpSuSUmWHwHbkiuW5ocFKQTZEa/HV7ThM1yFKDFw3wgcu6F8XGc269mACNsgNkjQXChIt9lxP",
- "/sahPCPhhdLcVeqwy4pcrIDg5OaDvWwRd9zQdJ5vicZ9zQhVhBJ/NU0JW5CtqMgVbk7OLrG/W43BWkEM",
- "0nBzWveoObxD6OshI4K8uRA5UI7I8+eujzK+YMtKgiJXK9Ard+dJUKXgCoiY/xNSbbb9/5z/+IYISX4A",
- "pegS3tL0kgBPRQbZETlbEC50QBqOlhCHpufQOhxcsUv+n0oYmijUsqTpZfxGz1nBIqv6gW5YURWEV8Uc",
- "pNlSf4VoQSToSvIhgOyIe0ixoJv+pBey4inufzNtS5Yz1MZUmdMtIqygm6+Opw4cRWiekxJ4xviS6A0f",
- "lOPM3PvBS6SoeDZCzNFmT4OLVZWQsgWDjNSj7IDETbMPHsYPg6cRvgJw/CCD4NSz7AGHwyZCM+Z0my+k",
- "pEsISOaI/OSYG37V4hJ4TehkvsVPpYQ1E5WqOw3AiFPvlsC50JCUEhYsQmPnDh2Gwdg2jgMXTgZKBdeU",
- "ccgMc0aghQbLrAZhCibcre/0b/E5VfD86dAd33wdufsL0d31nTs+arexUWKPZOTqNF/dgY1LVq3+I/TD",
- "cG7Flon9ubeRbHlhbpsFy/Em+qfZP4+GSiETaCHC302KLTnVlYSTD/yh+Ysk5FxTnlGZmV8K+9MPVa7Z",
- "OVuan3L702uxZOk5Ww4gs4Y1qnBht8L+Y8aLs2O9ieoVr4W4rMpwQWlLcZ1vydmroU22Yx5KmKe1thsq",
- "Hhcbr4wc2kNv6o0cAHIQdyU1DS9hK8FAS9MF/rNZID3Rhfzd/FOWuemty0UMtYaO3ZWM5gNnVjgty5yl",
- "1CDxnftsvhomAFaRoE2LGV6oJ58CEEspSpCa2UFpWSa5SGmeKE01jvQfEhaTk8lfZo39ZWa7q1kw+WvT",
- "6xw7GZHVikEJLcsDxnhrRB+1g1kYBo2fkE1YtodCE+N2Ew0pMcOCc1hTro8alaXFD+oD/N7N1ODbSjsW",
- "3x0VbBDhxDacg7ISsG14T5EA9QTRShCtKJAuczGvf7h/WpYNBvH7aVlafKD0CAwFM9gwpdUDXD5tTlI4",
- "z9mrI/JdODaK4oLnW3M5WFHD3A0Ld2u5W6y2Lbk1NCPeUwS3U8gjszUeDUbMvwuKQ7ViJXIj9eylFdP4",
- "765tSGbm91Gd/xwkFuJ2mLhQ0XKYszoO/hIoN/c7lNMnHGfuOSKn3b43IxszSpxgbkQrO/fTjrsDjzUK",
- "ryQtLYDui71LGUclzTaysN6Sm45kdFGYgzMc0BpCdeOztvc8RCFBUujA8HUu0su/U7W6gzM/92P1jx9O",
- "Q1ZAM5BkRdXqaBKTMsLj1Yw25oiZhqjgk3kw1VG9xLta3p6lZVTTYGkO3rhYYlGP/ZDpgYzoLj/if2hO",
- "zGdztg3rt8MekQtkYMoeZ+dkyIy2bxUEO5NpgFYIQQqr4BOjdR8E5ctm8vg+jdqjb6xNwe2QWwTukNjc",
- "+TH4WmxiMHwtNr0jIDag7oI+zDgoRmoo1Aj4XjnIBO6/Qx+Vkm77SMaxxyDZLNCIrgpPAw9vfDNLY5w9",
- "nQt5M+7TYSucNCZnQs2oAfOddpCETasycaQYMVvZBp2BGi/fbqbRHT6GsRYWzjX9A7CgzKh3gYX2QHeN",
- "BVGULIc7IP1VlOnPqYInj8n530+fPXr8y+Nnzw1JllIsJS3IfKtBkftONyNKb3N40F8ZakdVruOjP3/q",
- "DZXtcWPjKFHJFApa9oeyBlArAtlmxLTrY62NZlx1DeCYw3kBhpNbtBNr2zegvWLKSFjF/E42YwhhWTNL",
- "RhwkGewlpkOX10yzDZcot7K6C1UWpBQyYl/DI6ZFKvJkDVIxEfGmvHUtiGvhxduy+7uFllxRRczcaPqt",
- "OAoUEcrSGz6e79uhLza8wc1Ozm/XG1mdm3fMvrSR7y2JipQgE73hJIN5tWxpQgspCkJJhh3xjv4ONIoC",
- "F6yAc02L8sfF4m5URYEDRVQ2VoAyMxHbwsj1ClLBbSTEHu3MjToGPV3EeBOdHgbAYeR8y1O0M97FsR1W",
- "XAvG0emhtjwNtFgDYw7ZskWWt9dWh9Bhp7qnIuAYdLzGz2joeAW5pt8KedFYAr+ToirvXMjrzjl2OdQt",
- "xplSMtPX69CML/N29M3SwH4UW+MXWdBLf3zdGhB6pMjXbLnSgVrxVgqxuHsYY7PEAMUPVinLTZ++avZG",
- "ZIaZ6ErdgQjWDNZwOEO3IV+jc1FpQgkXGeDmVyounA3Ea6CjGP3bOpT39MrqWXMw1JXSyqy2Kgl6b3v3",
- "RdMxoak9oQmiRg34rmqno21lp7OxALkEmm3JHIATMXcOIue6wkVSdD1rL9440TDCL1pwlVKkoBRkiTNM",
- "7QXNt7NXh96BJwQcAa5nIUqQBZW3BvZyvRfOS9gmGCihyP3vf1YPvgC8Wmia70Estomht1bznRewD/W4",
- "6XcRXHfykOyoBOLvFaIFSrM5aBhC4UE4Gdy/LkS9Xbw9WtYg0R/3h1K8n+R2BFSD+gfT+22hrcqB8D+n",
- "3hoJz2wYp1x4wSo2WE6VTvaxZdOopYObFQScMMaJceABwes1Vdr6kBnP0PRlrxOcxwphZophgAfVEDPy",
- "z14D6Y+dmnuQq0rV6oiqylJIDVlsDRw2O+Z6A5t6LrEIxq51Hi1IpWDfyENYCsZ3yLIrsQiiuna1uCCL",
- "/uLQIWHu+W0UlS0gGkTsAuTctwqwG4ZADQDCVINoSzhMdSinjruaTpQWZWm4hU4qXvcbQtO5bX2qf2ra",
- "9omL6ubezgQojLxy7R3kVxazNvhtRRVxcJCCXhrZA80g1tndh9kcxkQxnkKyi/JRxTOtwiOw95BW5VLS",
- "DJIMcrrtD/qT/Uzs510D4I436q7QkNgopvimN5Tsg0Z2DC1wPBUTHgl+Iak5gkYVaAjE9d4zcgY4dow5",
- "OTq6Vw+Fc0W3yI+Hy7ZbHRkRb8O10GbHHT0gyI6jjwF4AA/10DdHBXZOGt2zO8V/gXIT1HLE4ZNsQQ0t",
- "oRn/oAUM2FBdgHhwXjrsvcOBo2xzkI3t4SNDR3bAoPuWSs1SVqKu8z1s71z1604QdTOSDDRlOWQk+GDV",
- "wDLsT2z8TXfMm6mCo2xvffB7xrfIcnKmUORpA38JW9S539rAzsDUcRe6bGRUcz9RThBQHy5mRPCwCWxo",
- "qvOtEdT0CrbkCiQQVc0LprUN2G6rulqUSThA1K+xY0bnxLNBkX4HxngVz3GoYHn9rZhOrE6wG76LjmLQ",
- "QofTBUoh8hEWsh4yohCMivcgpTC7zlzsuI8e9pTUAtIxbfTg1tf/PdVCM66A/JeoSEo5qlyVhlqmERIF",
- "BRQgzQxGBKvndJEdDYYghwKsJolfHj7sLvzhQ7fnTJEFXPkHF6ZhFx0PH6Id561QunW47sAeao7bWeT6",
- "QIePuficFtLlKfsjC9zIY3bybWfw2ktkzpRSjnDN8m/NADonczNm7SGNjIuqwHFH+XKCoWPrxn0/Z0WV",
- "U30XXitY0zwRa5CSZbCXk7uJmeDfrGn+Y91tj07XRIGxooCMUQ35lpQSUrDR+UZUU/XYR8TG7aUrypco",
- "oUtRLV3gmB0HOWylrC1EVrw3RFSK0RueoFU5xnFdsLB/oGHkF6BGh+qapK3GcEXr+dybnDFXod+5iIk+",
- "6pWaTgZVTIPUdaNiWuS0X5mM4L4tASvATzPxSN8Fos4IG318hdtiqNds7h9jI2+GjkHZnzgIZWs+DkWz",
- "Gf02396BlGEHIhJKCQrvhNAupOxXsQhflLlLQ22VhqJvOrddfxk4fu8GFTTBc8YhKQSHbfQRNePwA36M",
- "Hie8lwY6o4Qw1Lcr9Lfg74DVnmcMNd4Wv7jb3RPadRGpb4W8Kx+kHXC0PD3C5bfXv+2mvKljkuZ5xJfn",
- "3pt0GYCa1u/bmSRUKZEyFJLOMjW1B825/9zjlDb639ZRtHdw9rrjdpxW4VNGNMpCXhJK0pyhyVZwpWWV",
- "6g+colEoWGok2shrv8Nmwpe+SdwuGTEbuqE+cIqRZrWpKBohsYCIXeRbAG8tVNVyCUp3lIsFwAfuWjFO",
- "Ks40zlWY45LY81KCxJCfI9uyoFuyMDShBfkdpCDzSrfFbXxOpTTLc+dBM9MQsfjAqSY5UKXJD4xfbHA4",
- "72X3R5aDvhLyssZC/HZfAgfFVBKPivrOfsWAVbf8lQtexefv9rP1uZjxmzdXW7QZNU+6/9/9/zx5f5r8",
- "N01+P05e/K/Zx09Prx887P34+Pqrr/5/+6cn1189+M//iO2Uhz322MdBfvbKqaJnr1DfaJwuPdg/m8G9",
- "YDyJElkYPtGhLXIfH7Y6AnrQtkbpFXzgesMNIa1pzjLDW25CDt0bpncW7enoUE1rIzrWJ7/WA6X4W3AZ",
- "EmEyHdZ4YymqH0gYf1aHXkD3Ug7Py6Lidiu99G1fjfiALrGY1k8nbVaVE4Lv6lbURyO6Px8/ez6ZNu/h",
- "6u+T6cR9/RihZJZtYq8eM9jElDN3QPBg3FOkpFsFOs49EPZo7JoNpgiHLcBo9WrFys/PKZRm8ziH87H4",
- "zsiz4WfcBsmb84M+xa1zVYjF54dbS4AMSr2KZVtoCWrYqtlNgE6cRynFGviUsCM46hpZMqMvuii6HOgC",
- "X/2j9inGaEP1ObCE5qkiwHq4kFGWjBj9oMjjuPX1dOIuf3Xn6pAbOAZXd87agej/1oLc++6bCzJzDFPd",
- "sw9w7dDBk8mIKu1eBbUigAw3szlmrJD3gX/gr2DBODPfTz7wjGo6m1PFUjWrFMivaU55CkdLQU78Q6NX",
- "VNMPvCdpDaaBCp54kbKa5ywll6FC0pCnTe3RH+HDh/c0X4oPHz72giH66oObKspf7ASJEYRFpROXmCCR",
- "cEVlzNmk6ofpOLLNPLJrVitki8paJH3iAzd+nOfRslTdB6r95ZdlbpYfkKFyzy/NlhGlhfSyiBFQLDS4",
- "v2+EuxgkvfJ2lUqBIr8WtHzPuP5Ikg/V8fETIK0Xm7+6K9/Q5LaE0daVwQe0XaMKLtyqlbDRkiYlXcZ8",
- "Wh8+vNdAS9x9lJcLtHHkOcFurZeiPhIeh2oW4PExvAEWjoNfveHizm0vn4QqvgT8hFuIbYy40Xjab7pf",
- "wdvRG29X5/1pb5cqvUrM2Y6uShkS9ztT56ZZGiHLhz8otkRt1aXxmQNJV5BeuvwqUJR6O2119xE2TtD0",
- "rIMpm3nHvvzC3A/oEZgDqcqMOlGc8m33Eb4CrX0c7zu4hO2FaFJHHPLqvv0IXA0dVKTUQLo0xBoeWzdG",
- "d/NdGBcq9mXp31LjozpPFic1Xfg+wwfZirx3cIhjRNF6pDyECCojiLDEP4CCGyzUjHcr0o8tz2gZc3vz",
- "RbLweN5PXJNGeXIRV+Fq0OpuvxeAabzElSJzauR24TJQ2YfOARerFF3CgIQcOmVGPiduOXJwkH33XvSm",
- "E4vuhda7b6Ig28aJWXOUUsB8MaSCykwnzs7PZP1+zjOBiSUdwuY5ikl1QKJlOlS2nGM2U94QaHECBskb",
- "gcOD0cZIKNmsqPLJsTCHmD/Lo2SAP/Dh/q50LWdBiFiQKKxOxuJ5bvec9rRLl7TFZ2rx6VlC1XJEqhUj",
- "4WNUemw7BEcBKIMclnbhtrEnlCaJQLNBBo4fF4uccSBJLNosMIMG14ybA4x8/JAQa4Eno0eIkXEANvqz",
- "cWDyRoRnky8PAZK7JAjUj42e8OBviL/XsvHXRuQRpWHhbMCrlXoOQF2IYn1/dQJlcRjC+JQYNremuWFz",
- "TuNrBullDUGxtZMjxEVUPBgSZ3c4QOzFctCa7FV0k9WEMpMHOi7Q7YB4LjaJfbAZlXjnm7mh92hIOj4f",
- "jR1Mm5/lniJzscEoHbxabAj0HliG4fBgBBr+himkV+w3dJtbYHZNu1uailGhQpJx5ryaXIbEiTFTD0gw",
- "Q+RyP0i5ciMAOsaOJn+xU373Kqlt8aR/mTe32rRJJeZf+8SO/9ARiu7SAP76Vpg6ScrbrsQStVO0g03a",
- "+WECETJG9IZN9J00fVeQghxQKUhaQlRyGfOcGt0G8MY5990C4wVmoaF8+yCIYJKwZEpDY0T3cRJfwjxJ",
- "MfmdEIvh1elSLsz63glRX1PWjYgdW8v87CvAEOAFk0on6IGILsE0+lahUv2taRqXldoxUjZVLMvivAGn",
- "vYRtkrG8itOrm/f7V2baNzVLVNUc+S3jNmBljqmNo5GTO6a2wbU7F/zaLvg1vbP1jjsNpqmZWBpyac/x",
- "JzkXHc67ix1ECDBGHP1dG0TpDgYZvHjtc8dAbgp8/Ee7rK+9w5T5sfdG7fh3t0N3lB0pupbAYLBzFQzd",
- "REYsYTrIDNx/ijpwBmhZsmzTsYXaUQc1ZnqQwcPnU+tgAXfXDbYHA4HdM/YaRoJqp85rBHyb47mVueZo",
- "FGYu2gnuQoYQTsWUr1DQR1T9Wm4fri6A5t/D9mfTFpczuZ5Obmc6jeHajbgH12/r7Y3iGV3z1pTW8oQc",
- "iHJallKsaZ44A/MQaUqxdqSJzb09+jOzurgZ8+Kb09dvHfjX00maA5VJLSoMrgrblX+aVdksfQMHxGdA",
- "Nzqfl9mtKBlsfp1aLDRKX63ApZIOpNFezsvG4RAcRWekXsQjhPaanJ1vxC5xh48EytpF0pjvrIek7RWh",
- "a8pybzfz0A5E8+DixiVOjXKFcIBbe1cCJ1lyp+ymd7rjp6Ohrj08KZxrR7LrwuZzV0TwrgsdY563pfO6",
- "FxQzVlqrSJ858apAS0KicpbGbax8rgxxcOs7M40JNh4QRs2IFRtwxfKKBWOZZmNy0nSADOaIIlNF0+I0",
- "uJsLV6un4uy3CgjLgGvzSeKp7BxUTG/irO3969TIDv253MDWQt8MfxsZI8zW2r3xEIjdAkboqeuB+6pW",
- "mf1Ca4uU+SFwSRzg8A9n7F2JO5z1jj4cNdvgxVXb4xaW1unzP0MYNsf6/ro+Xnl1aWMH5ojW6WEqWUjx",
- "O8T1PFSPIw+NfH5ahlEuv0P40CGsTtFiMbV1pyk31Mw+uN1D0k1ohWoHKQxQPe584JbDRJneQk253Wpb",
- "NqMV6xYnmDCqdGbHbwjGwdyLxM3p1ZzGsogaIcPAdNo4gFu2dC2I7+xxr+rXFnZ2EviS67bMPiIvQTZv",
- "APsJaW4oMNhpR4sKjWSAVBvKBFPr/8uViAxT8SvKbfUV088eJddbgTV+mV5XQmIKCBU3+2eQsoLmcckh",
- "S/sm3owtmS0sUikIKle4gWzRJktFrvpH/YbIoeZsQY6nQfkctxsZWzPF5jlgi0e2xZwq5OS1IaruYpYH",
- "XK8UNn88ovmq4pmETK+URawSpBbqUL2pnVdz0FcAnBxju0cvyH102ym2hgcGi+5+npw8eoFGV/vHcewC",
- "cIVhdnGTDNnJPxw7idMx+i3tGIZxu1GPoq/lbWW4Yca14zTZrmPOErZ0vG7/WSoop0uIR4oUe2CyfXE3",
- "0ZDWwQvPbFkjpaXYEqbj84Omhj8NRJ8b9mfBIKkoCqYL59xRojD01JSlsJP64WyNJJdR2MPlP6KPtPQu",
- "oo4S+XmNpvZ+i60aPdlvaAFttE4JtXk/ctZEL/g85+TMpxXCFMt1ZmWLGzOXWTqKORjMsCClZFyjYlHp",
- "RfI3kq6opKlhf0dD4Cbz508jaaXb6U35YYB/drxLUCDXcdTLAbL3MoTrS+5zwZPCcJTsQfPaIziVg87c",
- "uNtuyHe4e+ixQpkZJRkkt6pFbjTg1LciPL5jwFuSYr2eg+jx4JV9dsqsZJw8aGV26Kd3r52UUQgZyxXY",
- "HHcncUjQksEaY/fim2TGvOVeyHzULtwG+i/refAiZyCW+bMcUwS+FhHt1Kc6ry3pLlY9Yh0YOqbmgyGD",
- "uRtqStpppT8/H72bKKi4p8sbtvuOLfPF4wH/6CLiC5MLbmDjy7crGSCUIK1+lGSy+nvgY6fka7EZSzid",
- "U+iJ518ARVGUVCzPfm5efnaqFkjK01XUZzY3HX9p6qvVi7N3YDTt34pyDnl0OCtv/uLl0ojk/E8xdp6C",
- "8ZFtu4UU7HI7i2sAb4PpgfITGvQynZsJQqy2H9XVQdv5UmQE52lyzDXHtV+AI0iT/lsFSsceKOEHGziG",
- "tlHDDmyWbgI8Q430iHxnSyivgLQSCKEm6DNFtF9NV2UuaDbFDBYX35y+JnZW28dWCbJZwpeoCLVX0bGJ",
- "Bekzx4Ug+4I/8ecR48fZHa9tVq10Uif1jj1ANS2atOOs4ydAFSnEzhF5FRRDtW9VzRCGHhZMFkarq0ez",
- "8hHShPmP1jRdodrXYq3DJD8+vb2nShWUlKxLQ9U5JfHcGbhdhnub4H5KhNHNr5iylXNhDe03r/UDcGd2",
- "8G9g28uTFeeWUo4OuOXqDJKHot0DZ69I70qIQtZB/IFCv60OcWi2/3PsFU1x1S0d0KslaV9Q1iV/fEX0",
- "lHLBWYoJpmJXtCuxO8bPNiIXV9eQ64+4O6GRwxUtWFCH4jksDpYw8IzQIa5v6A++mk211GH/1FjLdUU1",
- "WYJWjrNBNvV1N5ytkXEFLkcoFmQO+KSQLd8lcsioOzyp3SYHkhE+vRlQHr8139440wLGpF8yjkqEQ5sT",
- "/Kw1ECuAaqN5ME2WApRbT/v9sXpv+hzhU9wMNh+PfMVQHMO6/syyrZ+7P9Sp93o7L7Np+9K0dQmS6p9b",
- "Uc520tOydJMOV2WJygN6wwcRHPFeJt59FCC3Hj8cbQe57QxXwfvUEBqs0dkNJd7DPcKoK5R0ql8ZodVS",
- "FLYgNkwsmiWB8QgYrxmHpp5t5IJIo1cCbgye14F+KpVUWxFwFE+7AJqjhzvG0JR27o3bDtVND2VQgmv0",
- "cwxvY1NcZYBx1A0awY3ybV1G11B3IEy8xPrdDpH9UikoVTkhKsNXC53iKTHGYRi3L8/UvgD6x6AvE9nu",
- "WlJ7cg65iYYeos6rbAk6oVkWS9n6NX4l+JVkFUoOsIG0qlN7liVJMe9KOxFNn9rcRKngqip2zOUb3HK6",
- "oBpRhBrCikh+h/Ghy3yL/8byWg7vjAv0ODjU0Ed1ZIdlX+qHTsakXkPTiWLLZDwm8E65PTqaqW9G6E3/",
- "O6X0XCzbgHzm9BO7uFy4RzH+9o25OMLsDL1krfZqqZMnYGCf8DUkUW2sn/22uRJeZb3srehQqmvU7TZA",
- "DFebm+LlNxDeGyTdoPZ+tR7KoSDfdDAmnWr3Ok5TspMFDb44shFC9m0RQhG3zg5FBdmgIPO513ucZNiT",
- "s3U88WGAUB9u1gfoex/LSkrKnPu9YRZ9zLqo9/47hDHxsM0GdxfhYskHLXbfr4fivn0yNvzerUZ1Ce7J",
- "fClhzUTlHds+8smrhPbXVm2nOvI+uv6+4RWn+rLm0EHj7YWrCmCX6XTy73+2cXIEuJbbfwFTbm/Te3Wu",
- "+tKuNU81TUidUHpUgunWrTgmUWEsJ56TDVuVtvbUCeuR1asx4kC/7td0cpYddGHG8ipO7CixYxev4jWc",
- "dqpJNYVHrBSKNXndY+W9RoYYXmCFriBtVn8sH9+zhlRjMv8mbkECHJJEy0wWFAz9d/qpAXW6jsR0Wad2",
- "pZrqZ/Dfc8f3XoMFLxpt9vOj8YmVTuvoNOTTmA15CdzV7Gy/8xgdbb5YQKrZes/ru3+sgAcvu6beLmNr",
- "bweP8VgdvYzJWw63OjYA7XoctxOeIInircEZentzCdt7irSoIZqOfeqv2pvk7UAMIHdIDIkIFYv+sIZk",
- "55BnqqYMxIKPtrLdocmANljJKXhLesO5PEmai6N5X7pjyngpmVFzma4HvbrGQNyhB3r9ShTD+scrLPyh",
- "6iqLPu9HqKWTs352xCuXNwTfSta+E59BBJT/zT+MtrPk7BLCWlPoqbqiMvMtoqYXb9VJdtxHvVd1vopC",
- "F+hFPTNrYmP776gi+bYwAjrNhREjkqEw8nY4ah3LcU/ZoBub/h0DbQ1cC5CuJh/Kv7lQkGjhY2l3wbEL",
- "FTay6EZIUIM5Li1wg5ln3jWpdTDXL8VMM9QFFIULJBIKaqCTQQKc4Tl3Iful/e4fDvlcr3stTDW97i86",
- "4KOimeohMaT6BXG35f4HSTcxNjHObd1nFcuGw0G2vSGlFFmV2gs6PBi1QW50rqkdrCRqp0n7q+zoCMGr",
- "zkvYzqwS5Ks1+B0MgbaSkwU9yKLQ2eQ7Nb+pGNzLOwHvS1quppNSiDwZcHac9VP4dCn+kqWXkBFzU/jo",
- "wYHKN+Q+2thrb/bVautT1pQlcMgeHBFyym28tndst3NIdybn9/Su+Tc4a1bZrFrOqHb0gccDXzHflbwl",
- "N/PD7OZhCgyru+VUdpA9CWI2A+mDJL2K1IE6GquV913N3do8DVFZKGIySVN2Zk+cTB0i01T+aMJk+tJB",
- "nourBKkoqfN/xXQO067NJH3G06abwfYcgngbqtwFuiUrmpFUSAlp2CP+xMECVQgJSS4w/CbmGVxoIw8V",
- "GNfMSS6WRJRGzbVp9LwPJVqWJpjLPrO1PRPrqBlIZADKPat109jG/Xl2VK85vDLOxSpib0FEeywfXP7G",
- "EcrBVSsCMEcQ6H5b02msuk97Xd36UEPV2rQoWBpH958rymQwNmRP7aLI+mpydKWV/KvAAVxFXba7PaS2",
- "Dt18rJ+0zpk88lgEAAx7TlswjPKfHgrGAus6JjSC5LNaap22yu6yztn3+ewsjafUaq0rIGbsSoJ7pWYL",
- "0HUq55RUr/wtZpr3dUujp4DCJ2S2/AdV1hLiLTKu+l1XPBBlksMaWg5l93SuSlNQiq0hrJxnO5MMoET7",
- "ZFdqjnlKQy7XEaXc2pPA1zYGu1HZyiLW7hTZIzhFxbwNT+wxUWOPkoFozbKKtvCnblGLbKgMWYQNe1hH",
- "coqDmUR8cbtYxN7YBqT56Lnk8dCG8OVmbRTB2bLaeGqJsDnZqqRXfFiJiNidan/77ddBcDCiOi+pB698",
- "We/KTRXIQcrYRRi9+oFRmUOBr/8aJj3x4pbrG5GxrKmLqcgATDXnGaP3oIkOC5oVdEsytliAtMZ8pSnP",
- "qMzC5oyTFKSmzGg2W3VzsdZAKyuY7pVsDXfFQT2Dicm4aJeygORbpzLcQupEz01E4rRXrRZDJRJ7uxJ/",
- "TkA3RrrGuKoBInAPoVG2tgdMcBSQSEEv4cB5FPsddk+D6Umc7U8LnHXMFDFf6w1zq41i3f0whMjtFhRD",
- "3O0ZClMvNm+6pI1mQUuyvyC7NP5Dc3GOK8voO+wBL3QYBoUZve3GgfOFH0f9UCMlWMrHIUpoLX+fD9It",
- "sJE0gi1yjEBrsIlwbUB9e18CB7N6Wftth2qIdt27mGdRcFvkr+cWtrzJVu0LCMecBbmm+ed37WICzlPE",
- "B2Tvho3BoW8wRLJFpbrZy4TXdNTcgR/w7qbmb9EV/Q8wexTVSt1QToSpxXofzIM3C82t4WLhS3itgZMr",
- "HNPGsT16Tubu5XYpIWWqKxpd+eoatSsMi0251yAbvcf3tm+dPwt9CzJeeE2DvGky9aOOv+QNhM0R/cJM",
- "ZeDkRqk8Rn09sojgL8ajwhRqe66Ly1aAm6180nm5ISTccaBbELJ+YKBbPznc2OXZYC5z6VQK+uscfVu3",
- "cBu5qJu1jY3S7CN3Vzr3McGV8SoNpjtGd1qEYIkTgqCSXx/9SiQssIahIA8f4gQPH05d018ftz+b4/zw",
- "YVQ6+2xxnRZHbgw3b4xifh566Wdfsw08Ku3sR8XybB9htJ4IN1VA8RHsLy4RwRepQ/qLjTXpH1VXC+4W",
- "AXIWMZG1tiYPpgoe/4549+u6RV75oh8nrSTTW8yP6O0H7JdoBOp3dTSTi4ar9UN392lxCXWGzSb2qVL+",
- "dv1O0BzvI6u2cnMLifyIfLOhRZmDOyhf3Zv/FZ787Wl2/OTRX+d/O352nMLTZy+Oj+mLp/TRiyeP4PHf",
- "nj09hkeL5y/mj7PHTx/Pnz5++vzZi/TJ00fzp89f/PWe4UMGZAvoxGfjmfxfLNabnL49Sy4MsA1OaMm+",
- "h62tC2jI2FccpCmeRCgoyycn/qf/7U/YUSqKZnj/68Ql+5istC7VyWx2dXV1FHaZLTHYIdGiSlczP0+v",
- "JOHp27PaS2StQLij9p2st+55UjjFb+++Ob8gp2/PjoJ69SeT46Pjo0dY3rwETks2OZk8wZ/w9Kxw32eO",
- "2CYnn66nk9kKaI6xgeaPArRkqf8kgWZb9391RZdLkEeuDKP5af145sWK2ScX9HG969ssrGgy+9SKjcn2",
- "9MSKB7NPPpHf7tatTHkuJijoMBKKXc1mc8wPMrYpqKDx8FJQ2VCzTyguD/4+cwkN4h9RbbHnYeYDyOIt",
- "W1j6pDcG1k6PlOp0VZWzT/gfpM9ryzByiIWL2TwAlDTNp4RpQudCYgY9na4Mj/Cpu5gKWk6Qai3Bn2WG",
- "0E2vlxYCn6TTZi0/ed93Z+FAxI+EXMGQfHNoWzM1fBktUEEi7frWabVv7p73x8mLj58eTR8dX//F3C3u",
- "z2dPrkf6pV7W45Lz+uIY2fAj5r1CAySe5cfHx7eoCH/KA/TbTaof8kSKutudGHYZuK3qDERqZOzJz9MZ",
- "PlZi9no6eXrginfaklqPmyKlYr+mGfEuf5z70eeb+4xj1K3h8cTeYdfTybPPufozbkie5gRbBgkX+1v/",
- "E7/k4or7lkbgqIqCyq0/xqrFFIjbbLzW6FJhHIZka4pyHhe8VUVu8hFjf2JhFwP8Rml6A35zbnr9m998",
- "Ln6Dm3QX/KY90B3zm8cHnvk//4r/zWH/bBz23LK7W3FYJ/DZF+EzveEzdGfNPrUEVPe5J6C2f2+6hy3W",
- "hcjAy6BisbC55nd9nn2y/wYTwaYEyQrgNgen+9W+lpthBsht/+ctT6M/9tfRLfQa+3n2qV1oqIUgtap0",
- "Jq5s2rPolYXZ9GnuUu+iubZW/bQgfoDmaRL50b2mzrdoo2YZEIppnkSlG93cdK7DRGrviRmhKQK9ZBwn",
- "QDM4zmJzTNMg6F9BKrgtmdq5Hh1kb0QG/esRL8DfKpDb5gZ0ME6mLf7oCDyS0fnW102fnV0fRv5orre+",
- "pj5x1HVSW3/PrijT5hJ1b4QQo/3OGmg+cwmBOr82b/B7XzCxQPBjGOsS/XVWF0WIfuyqwrGvThUcaORd",
- "7/5zYxYLzUxIErWB6f1Hs7OYctdRS2M1OZnNMO5+JZSeTa6nnzoWlfDjx3ozfZ7EelOvP17/TwAAAP//",
- "Qz/sGdnLAAA=",
+ "H4sIAAAAAAAC/+x9/XPcNrLgv4Ka96oc+4aS/JHsWlVb7xQ7yeriJC5Lyd57li+LIXtmsOIAXAKUZuLT",
+ "/36FboAESXCGI03sTd37ydYQH41Go9Hd6I+Pk1StCiVBGj05/TgpeMlXYKDEv3iaqkqaRGT2rwx0WorC",
+ "CCUnp/4b06YUcjGZToT9teBmOZlOJF9B08b2n05K+GclSsgmp6asYDrR6RJW3A5sNoVtXY+0ThYqcUOc",
+ "0RDnryd3Wz7wLCtB6z6UP8l8w4RM8yoDZkouNU/tJ81uhVkysxSauc5MSKYkMDVnZtlqzOYC8kwf+UX+",
+ "s4JyE6zSTT68pLsGxKRUOfThfKVWMyHBQwU1UPWGMKNYBnNstOSG2RksrL6hUUwDL9Mlm6tyB6gERAgv",
+ "yGo1OX0/0SAzKHG3UhA3+N95CfAbJIaXCzCTD9PY4uYGysSIVWRp5w77JegqN5phW1zjQtyAZLbXEfuh",
+ "0obNgHHJ3n37ij1//vylXciKGwOZI7LBVTWzh2ui7pPTScYN+M99WuP5QpVcZknd/t23r3D+C7fAsa24",
+ "1hA/LGf2Czt/PbQA3zFCQkIaWOA+tKjf9ogciubnGcxVCSP3hBofdFPC+T/rrqTcpMtCCWki+8LwK6PP",
+ "UR4WdN/Gw2oAWu0Li6nSDvr+JHn54ePT6dOTu397f5b8l/vzy+d3I5f/qh53BwaiDdOqLEGmm2RRAsfT",
+ "suSyj493jh70UlV5xpb8Bjefr5DVu77M9iXWecPzytKJSEt1li+UZtyRUQZzXuWG+YlZJXPLpuxojtqZ",
+ "0Kwo1Y3IIJta7nu7FOmSpVzTENiO3Yo8tzRYaciGaC2+ui2H6S5EiYXrXvjABf3rIqNZ1w5MwBq5QZLm",
+ "SkNi1I7ryd84XGYsvFCau0rvd1mxyyUwnNx+oMsWcSctTef5hhnc14xxzTjzV9OUiTnbqIrd4ubk4hr7",
+ "u9VYrK2YRRpuTusetYd3CH09ZESQN1MqBy4Ref7c9VEm52JRlaDZ7RLM0t15JehCSQ1Mzf4BqbHb/r8u",
+ "fvqRqZL9AFrzBbzl6TUDmaoMsiN2PmdSmYA0HC0hDm3PoXU4uGKX/D+0sjSx0ouCp9fxGz0XKxFZ1Q98",
+ "LVbVislqNYPSbqm/QoxiJZiqlEMA0Yg7SHHF1/1JL8tKprj/zbQtWc5Sm9BFzjeIsBVf/+Vk6sDRjOc5",
+ "K0BmQi6YWctBOc7OvRu8pFSVzEaIOcbuaXCx6gJSMReQsXqULZC4aXbBI+R+8DTCVwCOH2QQnHqWHeBI",
+ "WEdoxp5u+4UVfAEByRyxnx1zw69GXYOsCZ3NNvipKOFGqErXnQZgxKm3S+BSGUiKEuYiQmMXDh2WwVAb",
+ "x4FXTgZKlTRcSMgsc0aglQFiVoMwBRNu13f6t/iMa/jqxdAd33wduftz1d31rTs+arexUUJHMnJ12q/u",
+ "wMYlq1b/EfphOLcWi4R+7m2kWFza22YucryJ/mH3z6Oh0sgEWojwd5MWC8lNVcLplXxi/2IJuzBcZrzM",
+ "7C8r+umHKjfiQizsTzn99EYtRHohFgPIrGGNKlzYbUX/2PHi7Niso3rFG6WuqyJcUNpSXGcbdv56aJNp",
+ "zH0J86zWdkPF43LtlZF9e5h1vZEDQA7iruC24TVsSrDQ8nSO/6znSE98Xv5m/ymK3PY2xTyGWkvH7kpG",
+ "84EzK5wVRS5SbpH4zn22Xy0TAFIkeNPiGC/U048BiEWpCiiNoEF5USS5SnmeaMMNjvTvJcwnp5N/O27s",
+ "L8fUXR8Hk7+xvS6wkxVZSQxKeFHsMcZbK/roLczCMmj8hGyC2B4KTULSJlpSEpYF53DDpTlqVJYWP6gP",
+ "8Hs3U4NvknYI3x0VbBDhjBrOQJMETA0faRagniFaGaIVBdJFrmb1D1+cFUWDQfx+VhSED5QeQaBgBmuh",
+ "jX6My+fNSQrnOX99xL4Lx0ZRXMl8Yy8HEjXs3TB3t5a7xWrbkltDM+IjzXA7VXlkt8ajwYr5h6A4VCuW",
+ "KrdSz05asY3/6tqGZGZ/H9X5j0FiIW6HiQsVLYc50nHwl0C5+aJDOX3CceaeI3bW7Xs/srGjxAnmXrSy",
+ "dT9p3C14rFF4W/KCAHRf6C4VEpU0akSwPpCbjmR0UZiDMxzQGkJ177O28zxEIUFS6MDwda7S679yvTzA",
+ "mZ/5sfrHD6dhS+AZlGzJ9fJoEpMywuPVjDbmiNmGqOCzWTDVUb3EQy1vx9IybniwNAdvXCwh1GM/ZHpQ",
+ "RnSXn/A/PGf2sz3blvXTsEfsEhmYpuPsHhkyq+2TgkAz2QZohVBsRQo+s1r3XlC+aiaP79OoPfqGbApu",
+ "h9wicIfU+uDH4Gu1jsHwtVr3joBagz4EfdhxUIw0sNIj4HvtIFO4/w59vCz5po9kHHsMku0Creiq8TTI",
+ "8Ma3szTG2bOZKu/HfTpsRbLG5My4HTVgvtMOkrBpVSSOFCNmK2rQGah55dvONLrDxzDWwsKF4b8DFrQd",
+ "9RBYaA90aCyoVSFyOADpL6NMf8Y1PH/GLv569uXTZ78++/IrS5JFqRYlX7HZxoBmXzjdjGmzyeFxf2Wo",
+ "HVW5iY/+1QtvqGyPGxtHq6pMYcWL/lBkACURiJox266PtTaacdU1gGMO5yVYTk5oZ2Tbt6C9FtpKWKvZ",
+ "QTZjCGFZM0vGHCQZ7CSmfZfXTLMJl1huyuoQqiyUpSoj9jU8YkalKk9uoNRCRV5T3roWzLXw4m3R/Z2g",
+ "ZbdcMzs3mn4riQJFhLLMWo7n+zT05Vo2uNnK+Wm9kdW5ecfsSxv53pKoWQFlYtaSZTCrFi1NaF6qFeMs",
+ "w454R38HBkWBS7GCC8NXxU/z+WFURYUDRVQ2sQJtZ2LUwsr1GlIlyRNih3bmRh2Dni5ivInODAPgMHKx",
+ "kSnaGQ9xbIcV15WQ+OihNzINtFgLYw7ZokWWD9dWh9BBUz3SEXAsOt7gZzR0vIbc8G9VedlYAr8rVVUc",
+ "XMjrzjl2OdwtxplSMtvX69BCLvK2983Cwn4UW+NnWdArf3zdGhB6pMg3YrE0gVrxtlRqfngYY7PEAMUP",
+ "pJTltk9fNftRZZaZmEofQARrBms4nKXbkK/xmaoM40yqDHDzKx0Xzgb8NfChGN+3TSjvmSXpWTOw1JXy",
+ "yq62Khi+3vbui6ZjwlM6oQmiRg+8XdWPjtSKpiNfgLwEnm3YDEAyNXMPRO7pChfJ8enZePHGiYYRftGC",
+ "qyhVClpDljjD1E7QfDu6OswWPCHgCHA9C9OKzXn5YGCvb3bCeQ2bBB0lNPvi+1/0488Ar1GG5zsQi21i",
+ "6K3VfPcK2Id63PTbCK47eUh2vATm7xVmFEqzORgYQuFeOBncvy5EvV18OFpuoMT3uN+V4v0kDyOgGtTf",
+ "md4fCm1VDLj/OfXWSnh2wySXygtWscFyrk2yiy3bRi0d3K4g4IQxTowDDwheb7g29IYsZIamL7pOcB4S",
+ "wuwUwwAPqiF25F+8BtIfO7X3oNSVrtURXRWFKg1ksTVIWG+Z60dY13OpeTB2rfMYxSoNu0YewlIwvkMW",
+ "rYQQxE391OKcLPqLwwcJe89voqhsAdEgYhsgF75VgN3QBWoAEKEbRBPhCN2hnNrvajrRRhWF5RYmqWTd",
+ "bwhNF9T6zPzctO0TFzfNvZ0p0Oh55do7yG8Js+T8tuSaOTjYil9b2QPNIPTY3YfZHsZEC5lCso3yUcWz",
+ "rcIjsPOQVsWi5BkkGeR80x/0Z/rM6PO2AXDHG3VXGUjIiym+6Q0le6eRLUMrHE/HhEeGX1hqj6BVBRoC",
+ "cb13jJwBjh1jTo6OHtVD4VzRLfLj4bJpqyMj4m14o4zdcUcPCLLj6GMAHsBDPfT9UYGdk0b37E7xn6Dd",
+ "BLUcsf8kG9BDS2jG32sBAzZU5yAenJcOe+9w4CjbHGRjO/jI0JEdMOi+5aURqShQ1/keNgdX/boTRJ8Z",
+ "WQaGixwyFnwgNbAI+zPyv+mOeT9VcJTtrQ9+z/gWWU4uNIo8beCvYYM691ty7AxMHYfQZSOj2vuJS4aA",
+ "encxK4KHTWDNU5NvrKBmlrBht1AC09VsJYwhh+22qmtUkYQDRN81tszoHvHIKdLvwJhXxQscKlhefyum",
+ "E9IJtsN32VEMWuhwukChVD7CQtZDRhSCUf4erFB214XzHffew56SWkA6po0vuPX1/0i30IwrYP+pKpZy",
+ "iSpXZaCWaVSJggIKkHYGK4LVczrPjgZDkMMKSJPEL0+edBf+5Inbc6HZHG59wIVt2EXHkydox3mrtGkd",
+ "rgPYQ+1xO49cH/jgYy8+p4V0ecpuzwI38pidfNsZvH4lsmdKa0e4dvkPZgCdk7kes/aQRsZ5VeC4o95y",
+ "gqFj68Z9vxCrKufmEK9WcMPzRN1AWYoMdnJyN7FQ8psbnv9Ud8NgEkgtjaaQpBgCMXIsuLR9KGpil27Y",
+ "eJOJ1QoywQ3kG1aUkAJ5+VuRT9cwHjHy/0uXXC5Q0i9VtXAOaDQOcupKk02lrGRviKg0ZNYyQet0jHM7",
+ "p2Mf6GHlIOBWF+uatknzuOX1fC62Z8yVGiCva+qPvm5NJ4OqqkXqTaOqEnLa0SojuHhLUAvw00w88g0E",
+ "UWeFlj6+wm2xp8Bu7u9ja2+GjkHZnzhwiWs+DnnFWT053xxAWqGBWAlFCRrvltC+pOmrmoeRae7y0Rtt",
+ "YNU3wVPXXweO37tBRU/JXEhIVkrCJhqMLST8gB+jxwnvt4HOKGkM9e0qDy34O2C15xlDjQ/FL+5294R2",
+ "n5r0t6o81FsmDThaLh/xdLjzndxNed8HTp7nkTdBF7fSZQB6WsfJi5JxrVUqUNg6z/SUDpp7RnRBLm30",
+ "v629cQ9w9rrjdh6/wpBINO5CXjDO0lyg6VdJbcoqNVeSo3EpWGrEa8lr0cPmxle+Sdy+GTE/uqGuJEeP",
+ "tdrkFPW0mEPEvvItgLc66mqxAG06Ssoc4Eq6VkKySgqDc63scUnovBRQouvQEbVc8Q2bW5owiv0GpWKz",
+ "yrTFdgzL0kbkuXuJs9MwNb+S3LAcuDbsByEv1zicf633R1aCuVXldY2F+O2+AAla6CTuXfUdfUXHV7f8",
+ "pXOCxTB6+kxvN3b8JnZrg7anJjT8/3zxH6fvz5L/4slvJ8nL/3H84eOLu8dPej8+u/vLX/5v+6fnd395",
+ "/B//HtspD3ssaMhBfv7aqbTnr1FvaR5verB/MsP9SsgkSmShG0aHttgXGCDrCOhx26pllnAlzVpaQrrh",
+ "ucgsb7kPOXRvmN5ZpNPRoZrWRnSsWH6te2oDD+AyLMJkOqzx3lJU3yExHp6Hr4ku4g7Py7yStJVe+qbo",
+ "E+8YpubTOgSTsrOcMozPW3Lv1ej+fPblV5NpE1dXf59MJ+7rhwgli2wdi57MYB1T8twBwYPxSLOCbzSY",
+ "OPdA2KM+cOSUEQ67gtUMSr0UxafnFNqIWZzDeZ9+Zyxay3NJzvb2/ODb5MY9eaj5p4fblAAZFGYZy9rQ",
+ "EtSwVbObAB1/kaJUNyCnTBzBUddYk1l90Xnj5cDnmD0AtU81RhuqzwERmqeKAOvhQkZZRGL0gyKP49Z3",
+ "04m7/PXB1SE3cAyu7pz1Q6T/2yj26LtvLtmxY5j6EQXy0tBB6GVElXbRRS1PIsvNKFcNCXlX8kq+hrmQ",
+ "wn4/vZIZN/x4xrVI9XGlofya51ymcLRQ7NQHLL3mhl/JnqQ1mE4qCBVjRTXLRcquQ4WkIU9KEdIf4erq",
+ "Pc8X6urqQ8+poq8+uKmi/IUmSKwgrCqTuAQHSQm3vIw9Wuk6wB1Hpgwm22YlIVtVZNn0CRTc+HGex4tC",
+ "dwNd+8svitwuPyBD7cI47ZYxbVTpZREroBA0uL8/KncxlPzW21UqDZr9fcWL90KaDyy5qk5OngNrRX7+",
+ "3V35liY3BYy2rgwG4naNKrhwUithbUqeFHwRexu7unpvgBe4+ygvr9DGkecMu7UiTr1HPQ7VLMDjY3gD",
+ "CI69o+dwcRfUyyezii8BP+EWYhsrbjQv9vfdryAG9d7b1Ylj7e1SZZaJPdvRVWlL4n5n6hw3CytkeTcK",
+ "LRaorbp0QDNg6RLSa5enBVaF2Uxb3b2njhM0PesQmjL4UAQZ5pDAl4UZsKrIuBPFudx0g/k1GOP9gd/B",
+ "NWwuVZOCYp/o/XYwuR46qEipgXRpiTU8tm6M7uY7dzBU7IvCx2RjcJ4ni9OaLnyf4YNMIu8BDnGMKFrB",
+ "zkOI4GUEEUT8Ayi4x0LteA8i/djyrJYxo5svks3H837mmjTKk/PcCleDVnf6vgJMB6ZuNZtxK7crl8mK",
+ "AqYDLlZpvoABCTl83BkZltx6EMJBdt170ZtOzbsXWu++iYJMjRO75iilgP1iSQWVmY6/np+J3g/dywQm",
+ "qHQIm+UoJtWOjcR0eNl6ZKOMe0OgxQkYStkIHB6MNkZCyWbJtU+yhbnI/FkeJQP8jgkAtqV9OQ9czYKE",
+ "Y3VSF89zu+e0p1265C8+44tP8xKqliNStlgJH73bY9uhJApAGeSwoIVTY08oTTKCZoMsHD/N57mQwJKY",
+ "11pgBg2uGTcHWPn4CWNkgWejR4iRcQA2vovjwOxHFZ5NudgHSOmSKXA/Nr6oB39DPO6L/LityKMKy8LF",
+ "wKtW6jkAd66O9f3VcbjFYZiQU2bZ3A3PLZtzGl8zSC/7CIqtnVwjzjPj8ZA4u+UBhC6WvdZEV9F9VhPK",
+ "TB7ouEC3BeKZWicU+BmVeGfrmaX3qGs7hqHGDibleXmk2Uyt0dsHrxZypd4ByzAcHoxAw18LjfSK/YZu",
+ "cwJm27TbpakYFWokGWfOq8llSJwYM/WABDNELl8EqVvuBUDH2NHkQXbK704ltS2e9C/z5labNinJfNRQ",
+ "7PgPHaHoLg3gr2+FqZOtvO1KLFE7RdtppZ1nJhAhY0Rv2UT/kab/FKQhB1QKkpYQlVzHXk6tbgN441z4",
+ "boHxArPZcLl5HHhClbAQ2kBjRPd+Ep/DPMkxiZ5S8+HVmaKc2/W9U6q+pugZETu2lvnJV4CuxHNRapPg",
+ "C0R0CbbRtxqV6m9t07is1Pa1opSzIovzBpz2GjZJJvIqTq9u3u9f22l/rFmirmbIb4Ukh5UZpkiOemBu",
+ "mZqcdLcu+A0t+A0/2HrHnQbb1E5cWnJpz/EHORcdzruNHUQIMEYc/V0bROkWBhlEzva5YyA3BW/8R9us",
+ "r73DlPmxd3rt+PjdoTuKRoquJTAYbF2FwGciK5YIE2QY7oe0DpwBXhQiW3dsoTTqoMbM9zJ4+LxsHSzg",
+ "7rrBdmAgsHvGompK0O0UfI2AT7miWxlwjkZh5rKdKC9kCOFUQvtKB31E1VF3u3B1CTz/Hja/2La4nMnd",
+ "dPIw02kM127EHbh+W29vFM/4NE+mtNZLyJ4o50VRqhueJ87APESapbpxpInNvT36E7O6uBnz8puzN28d",
+ "+HfTSZoDL5NaVBhcFbYr/jCromx/AwfEZ1K3Op+X2UmUDDa/TlEWGqVvl+BSUgfSaC93ZvPgEBxFZ6Se",
+ "xz2Edpqc3dsILXHLGwkU9RNJY76jF5L2qwi/4SL3djMP7YA3Dy5uXALWKFcIB3jw60rwSJYclN30Tnf8",
+ "dDTUtYMnhXNtSZq9orzwminZfUJHn+dN4V7dVxwzX5JVpM+cZLVCS0Kic5HGbaxypi1xSHo7s40ZNh4Q",
+ "Ru2IlRh4ipWVCMayzcbktukAGcwRRaaOptdpcDdTruZPJcU/K2AiA2nspxJPZeegYpoUZ23vX6dWdujP",
+ "5QYmC30z/ENkjDDra/fGQyC2CxjhS10P3Ne1yuwXWluk7A/Bk8QeD/7hjL0rcctjvaMPR83kvLhsv7iF",
+ "JXr6/M8SBuVq310fyCuvLv3swBzRej9CJ/NS/QZxPQ/V40jAks9zK9DL5TcIAx3CKhctFlNbd5qyRc3s",
+ "g9s9JN2EVqi2k8IA1ePOB89ymHDTW6i5pK2mQJKWr1ucYEKv0mMavyEYB3PPEzfntzMey0ZqhQwL01nz",
+ "ANyypRvFfGePe11HW9DsLHhLrtsKCkYvoGxiCfuJbe4pMNC0o0WFRjJAqg1lgim9/+VaRYap5C2XVMXF",
+ "9qOj5HprIOOX7XWrSkwloeNm/wxSseJ5XHLI0r6JNxMLQQVKKg1BBQw3EBV/IipyVUTqGCKHmvM5O5kG",
+ "ZXjcbmTiRmgxywFbPKUWM66Rk9eGqLqLXR5Is9TY/NmI5stKZiVkZqkJsVqxWqhD9aZ+vJqBuQWQ7ATb",
+ "PX3JvsBnOy1u4LHForufJ6dPX6LRlf44iV0ArsDMNm6SITv5m2MncTrGd0sawzJuN+pRNOqeKswNM64t",
+ "p4m6jjlL2NLxut1nacUlX0DcU2S1Aybqi7uJhrQOXmRG5ZG0KdWGCROfHwy3/GnA+9yyPwKDpWq1Embl",
+ "Hne0Wll6aspb0KR+OKq15DITe7j8R3wjLfwTUUeJ/LRGU7rfYqvGl+wf+QraaJ0yTvlDctF4L/h86ezc",
+ "pyfCVM11hmbCjZ3LLh3FHHRmmLOiFNKgYlGZefJnli55yVPL/o6GwE1mX72IpKdup0mV+wH+yfFegoby",
+ "Jo76coDsvQzh+rIvpJLJynKU7HET7RGcysHH3Piz3dDb4fahxwpldpRkkNyqFrnxgFM/iPDklgEfSIr1",
+ "evaix71X9skpsyrj5MEru0M/v3vjpIyVKmM5B5vj7iSOEkwp4AZ99+KbZMd84F6U+ahdeAj0n/flwYuc",
+ "gVjmz3JUEbhZ/eLNsoM++1aE/+UHV06xJ3sP+BmQI0Hd5xPHIkRdkkhCQzc+hqtmf3/6d1bC3BVIfPIE",
+ "gX7yZOqEub8/a38mJvXkSTwTT9SmYX9tsLAXK+xmKrB9Y3v4tYpYGHza+/o1xMUbRCw8Q6zWfrBHeeaG",
+ "mrJ2ivFPfxcexpMt/loZPwVXV+/xi8cD/tFFxGc+8riBjT8GrWSAUIISC1GSyervgZ8EZ1+r9VjC6XBS",
+ "Tzz/AiiKoqQSefZLE73bYW0ll+ky+u45sx1/bWrt1YujwxtNAbnkUkIeHY50hl+9bhHRfv6hxs6zEnJk",
+ "225RDVpuZ3EN4G0wPVB+QoteYXI7QYjVdmBk7XifL1TGcJ4m32BzXPvFWIKU+f+sQJvYhYUfyPkP7duW",
+ "HVDGdgYyQ6vCEfuOymkvgbWSSaE277N9tCPfqyJXPJtiFpLLb87eMJqV+lDFKMoYv0Bltr2Kjl0zSKU6",
+ "zo3cF3+Kh7iMH2e7z71dtTZJneA9FkRsWzQp6EXnrQfV3BA7R+x1UBiX4o3tEAyT0JQrq5nXo5GMizRh",
+ "/2MMT5eourdY6zDJjy914KlSB+VF6zJhdX5RPHcWblftgIodTJkySyhvhaYqynAD7bjlOojfmY58HHN7",
+ "eWUlJVHK0R63XJ1NdF+0e+DoivTPQVHIOojfU3GjSiH7Vn64wF7RdGfdMhK9uqIUBVuXf/LV8VMulRQp",
+ "JhuLXdGu3PKYt9IRedm6xnh/xN0JjRyuaPGK2p3SYXGwnIVnhA5x/cea4KvdVKIO+tNgXd8lN2wBRjvO",
+ "BtnU12Bx9mIhNbh8sVicO+CTqmy9PyOHjLo0JPXT155khOFTAwaAb+23H515COMKroVERdChzQl+ZNHF",
+ "arDGao/CsIUC7dbTjiHX722fIwynzmD94chXj8Ux6PnWLpt8FfpDnXnPBecpYNu+sm1dkqv655anOk16",
+ "VhRu0uEKPVF5wKzlIIIjL9CJfwIMkFuPH462hdy2uhzhfWoJDW7QYQEKvId7hFFXq+lUQrNCK1EUtmDk",
+ "6hfNdCFkBIw3QkJT2zhyQaTRKwE3Bs/rQD+dltyQCDiKp10Cz0mhjjA0bdwT1UOH6qb4sijBNfo5hrex",
+ "KbQzwDjqBo3gxuWmLqlsqTsQJl5hLXeHyH7ZHJSqnBCVYeRJp5BOjHFYxu1LdbUvgAE9vyUTUXfMd7fv",
+ "TTQUTDyrsgWYhGdZLH3v1/iV4VeWVSg5wBrSqk7zWhQsxdw57WRCfWpzE6VK6mq1ZS7f4IHTBZWpItQQ",
+ "VsfyO4zBSrMN/hvLcTq8M85ZZ293Ue+Zk+2XQavv/hqTei1NJ1oskvGYwDvl4ehopr4foTf9D0rpuVq0",
+ "AfkcZrsBLhfuUYy/fWMvjjDDRi9xL10tdQIMdM5Uvp4oqo116HabK+FV1svki4+Cdb3C7QaI4cqDU7z8",
+ "Bly0QyMs3a9kmBxy1E4H4wq4cRGOhrOtLGgwaoy8vDpm3b6Ffcizixy7DmcOdWvdilDvMtgH6Hvvj8wK",
+ "LpwLRcMs+ph1kQv9WJIxPs3NBncX4eIBBi12398M+e77hHr4vVuZ7Bpc2oOihBuhKu+c4L3XvEpIv7bq",
+ "fNXRE9H19w2vONXnNYcOGm8vXYUIWqbTyb//hXwdGUhTbv4FTLm9Te/VPOtLu2SeapqwOrn4qGTjrVtx",
+ "TLLJWF5DJxu2qq7tqBnXI6vXY8SBfg246eQ82+vCjOXGnNAosWMXr+g2nDqsSReGR6xQWjQ5/mOl3ka6",
+ "iV5itbYg9Vl/LO+jdQOpwcIOje9JCbBPIjQ7WVA89r9TiA2o07U3rcscti1dWL+aw447vhfRF0SlUib8",
+ "o/HJsc5qD0Pk05jRegHS1W9tx+qMjhiYzyE14mZHBOXfliCD6Lypt8tQHfYgoFLUHuiYgGd/q2MD0LYA",
+ "x63wBIkwHwzOUPzUNWweadaihmhq/qm/au+TewUxgNwhsSSidMyDhwzJzqlC6JoyEAveY466Q5PFbrCq",
+ "VxAPfM+5PEnai6OJEd4yZbys0Ki5bNe9IufRmXooyLJflWRY/3iNRWB0XXHT524JtXR23s9weetyv2C8",
+ "a/124rPAgPa/+eB2miUX1xDWHcOXqlteZr5F1PTirTrJlvuoFxnpK2p0gZ7XM4vGv7kfCxfJmYZe7Gmu",
+ "rBiRDIUCtF2Ka3+cR5ocpyiFPzpLW7jmULr6jCj/5kpDYpT3h94GxzZUkHfYvZCgB/OUEnCD2YPeNemR",
+ "MF8zx2xB3DmFhQtkJay4ha4MkhgNz7kN2a/ouw/+8vl6d1qYanrdXTjCe7YL3UNiSPVz5m7L3UFl9zE2",
+ "CSmpBriOZTSSULZfQ4pSZVVKF3R4MGqD3Oh8YVtYSdROk/ZX2dERgsjca9gckxLkK274HQyBJsmJQA8y",
+ "YXQ2+aDmNx2De3EQ8D6n5Wo6KZTKk4HHjvN+GqYuxV+L9BoyZm8K7wE6UAWJfYE29vo1+3a58WmHigIk",
+ "ZI+PGDuT5HPvH7bbecA7k8tHZtv8a5w1qygzmjOqHV3JuPMy5iwrH8jN/DDbeZgGy+oeOBUNsiPJz3og",
+ "BVTJbyM1wY7GauX9p+ZunaaGqAiKmExyQS9Wr/CgxwxHt6Uw4Bwb6BK3G8ncSxfTuYo5CcLtuPj92qHU",
+ "7kiuBi7ucDIEyIAcE+dZQ+EGjyKgrsG0w1Go9hFqytc0fkJ98SjP1W2Cxyipk9jFlC7brn1L+LS9TTdL",
+ "bjMIHI64dhLEhi15xlJVlpCGPeJxOgTUSpWQ5Ar9j2JPo3NjBcIVOudLlqsFU4XV8ykXpH9EitZWCuY6",
+ "VB0pijknCBJ68RrI6gHaxZg7cKlxH94tpZz2LxN1uYwYrnDD/G7tXQvKEdzeJVwCMEcQ+m6j3Vms1FV7",
+ "Xd2ia0MlEI1aiTSO7j+Wu86gk02MemOocFmUKYoTm+EBD3lK/TqLp6ePZpB8lkd5tTt+7pUK6dz+F6/w",
+ "7rhsDo65DPCzSM1mYsNJOnhZdABASCm0yFQlpV4OWXld0E0tKBQR39i6gI5kOOjK8DDY7AiHBOpuO6HE",
+ "Kr5FDkK9O64gnY+lHjhUUSeJ7T4JVAV0NtYzoc40P5J/BgAM+yq0YBjlsbAvGHOsqpvwCJLPaz1x2ip6",
+ "LjqXhM8CSsww5WQnWgKzY1cluNheKv/ZqTdWcLP0cqNt3rfmyAzWoDHwloomcU22R28DdbVHuwK5KpIc",
+ "bqDlwuECjqs0Ba3FDYR1S6kzywAKfBHo6qkx34TwOuwoL27tSfC6PQa7UW2GEEs7xXaoKlHFai0TOiZ6",
+ "7FGyEN2IrOIt/OkHVHAcKt4Yua89rB/GcYq9mUR8cdtYxE5vIqT56LmUcWeiMN69NkPibFn9XEFE2Jxs",
+ "XfBbOay294myETfH1z4NEPvNGlK8utveMg/HCcPBmO7kshiUM8t6h+9r/hmksm1E1qsEG9fDwFfyDtNO",
+ "eV3B9Y1cjWSoFjoygNANb0DfW2h8O4NmK75hmZjPoaSnOG24zHiZhc2FZCmUhgvJbvlG318ns9CWFUx3",
+ "qmWWU+OgnlnFFDS0KhMg+cYp/EMq0whVB99dI2oOXdtGDRWp7e1KPBiIr61qiF6RA0TgUlGgYkiHVUmU",
+ "ytmKX8Oe82jxG2yfBhNEOcu9UTjrmCnuttL6T4g6PPA/S2G2UjvJe103VXpHJGL0NCgXjTMDbU6fBmOe",
+ "xZdUKi30Lu5WHvF7TUZNmg8GMqm2xfSBXUSzjnNLD2VyPV5dbVmOYv7LxMMT5O16i7sC6KBWW+rMzX2x",
+ "pHcpEFKmzvt7T6mF1AWeZWKoNP4SXLpyd7ba09YmQDvOeEt3YO+KQ1SoIknHvGFlkINlNaS1OEjbMI6w",
+ "kRXpjmshekkOcKW2iqTmyB/wWJBogN4+9YU47fqhtYWA+uBh3eW0KlGMveWb3SkxG0Eg7sJPI3sd3Hsm",
+ "1VC7DaYjrqmUTzTj5D4CYoTrxKrZ9HP9HX4xFJvSvJ7/fstx72PxBZxJpyhhjcJt9NaoUp5UIrTG5SbG",
+ "NPwL0D0WOCQfjvCuPthW1afl99ig6CV5vxTQo0Dre9pGsBnUbN/u/BRmiG/SFpTksI3OEl4j7fKLHxpN",
+ "dVz1eN9hB3ihT1xQP94/TzpwPnP8/w81UoKlfBiihNbyd7nZuQU2qn2wRU5aNgaoXgfFjLb3JfCh1K9q",
+ "18SBq7nnwYjp4K14lucRz0cS4Km4eEA49l4sb3j+6b0XsU7AGeIDsnfD/g6h+1uIZEKlvl/w7Rs+au7A",
+ "1e1wU8u36G35N7B7FL0W3FDOZtBj/qh+8Zyepua+0vANSHaLY5LF9ulXbOYSTBUlpEJ3bRG3vghg7e2F",
+ "NXFdwPPa7HAv27XOX5R5ABnPvWmP/dgUFMPXl4VsIGyO6GdmKgMnN0rlMerrkUUEfzEeFWZ63nFdXLdi",
+ "OBqpLrjRVAkHjuUIojL3jOXo57AeuzyKV7CXTqWhv87Rt3ULt5GLulnb2ECk0dmgsNrTmPiheOYm2x0D",
+ "mA6SwmmvBE6/Q+gS4ciN4eaNUcwvQ8ksKGHDQN6Uzn5UIs92EUYrC85dXSMf87z86vKlfdq71ENA7tT9",
+ "o+pKVj8gBoQQE1lra/JgqiC/zYjUNq5bJJENuiqlVSnMBtO4e41X/BoNsvqudth3AR+1EdXdfUZdQ10I",
+ "oHHvr7S/Xb9TPMf7iGy70t5CKj9i36z5qsidTYT95dHsT/D8zy+yk+dP/zT788mXJym8+PLlyQl/+YI/",
+ "ffn8KTz785cvTuDp/KuXs2fZsxfPZi+evfjqy5fp8xdPZy++evmnR5YPWZAJ0IlPGjr538lZvlDJ2dvz",
+ "5NIC2+CEF+J72FD5ckvGvjA6T/EkwoqLfHLqf/qf/oQdpWrVDO9/nbichJOlMYU+PT6+vb09CrscL9Cf",
+ "NzGqSpfHfp5e5fSzt+f1uzk9u+CO1h5T5IvjSOEMv7375uKSnb09P2oIZnI6OTk6OXpqx1cFSF6Iyenk",
+ "Of6Ep2eJ+37siG1y+vFuOjleAs8x/MX+sQJTitR/KoFnG/d/fcsXCyiPXLV4+9PNs2MvVhx/dH7Nd9u+",
+ "HYeFF48/tty/sx09sTDb8Uefb3x761ZCb+f2HnQYCcW2ZsczTIE3tinooPHwUlDZ0McfUVwe/P3Y5eyK",
+ "f0S1hc7DsY+RiLdsYemjWVtYOz1SbtJlVRx/xP8gfd4Rw8ghFhFBqa44a5pPmTCMz1SJib5NurQ8wmcY",
+ "FjpoOUGqJYI/zyyh216vCAJfS4CKK52+7/tm4EDMj4RcwZJ8c2hbMzV8GZ9pgno/9a3Tat/cPe9Pkpcf",
+ "Pj6dPj25+zd7t7g/v3x+N9Jj6FU9LruoL46RDT9gel58pcOz/OzkxDMwpx4ExHfszmqwuJ6a1CySNqmO",
+ "Ve/f644Wht/o3VZ1BmI1MnakEe0M3xdPkGe/2HPFW21Jrfh9HL6bWTBj3qkT53766eY+lxhYZnk8ozvs",
+ "bjr58lOu/lxakuc5w5ZBXvj+1v8sr6W6lb6lFTiq1YqXG3+MdYspMLfZeK3xhcZXhFLccJTzpJKtYteT",
+ "D+jeHnOsHeA32vB78JsL2+u/+c2n4je4SYfgN+2BDsxvnu155v/4K/7/m8O+OPnzp4PAxwVcihWoyvxR",
+ "OfwFsdsHcXgncFLSpWOzlsfoc3L8sSUgu889Abn9e9M9bHGzUhl4GVjN51SSa9vn44/0bzARrAsoxQok",
+ "lSpwv1JCimNMlL/p/7yRafTH/jqKTnXp2M/HH9v1WFsI0svKZOqWMgtHr0wsOsZzV6EEzcW16mkU8wM0",
+ "0f/sJ5ewKN+gjVxkwDhmUlWVaWwDtnPtF1q/3tgRmF46M/lCSJwAzfA4C5Xi4YFPgoZUyQw13s717CD7",
+ "UWXQv57xAv5nBeWmuYEdjJNpiz87Ao8Uvnnwdddnp3f7kT8+F9BbV584XNnzzt/Ht1wYe4m7MHzEaL+z",
+ "AZ4fu5ybnV+bNFe9L5i7K/gxdG6N/npc146Lfuyq4rGvThUdaOT94/znxiwXmrmQJGoD1/sPdmexMomj",
+ "lsZqc3p8jKGtS6XN8eRu+rFj0Qk/fqg306cirzf17sPd/wsAAP//2NTAeUjZAAA=",
}
// GetSwagger returns the content of the embedded swagger specification file
diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go
index 8eb9f99c1..774d72558 100644
--- a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go
+++ b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go
@@ -702,253 +702,263 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9a5PbtpLoX0Fpt8qPFTV+Zk+mKrV3YucxG8dx2ZOcPRv7JhDZknCGAngAUCPF1//9",
- "FhoACZKgRM1oxnYyn+wRSaDRaDT63e9HqVgWggPXanT8flRQSZegQeJfNE1FyXXCMvNXBiqVrNBM8NGx",
- "f0aUlozPR+MRM78WVC9G4xGnS6jfMd+PRxL+VTIJ2ehYyxLGI5UuYEnNwHpTmLerkdbJXCRuiBM7xOnz",
- "0YctD2iWSVCqC+VPPN8QxtO8zIBoSbmiqXmkyAXTC6IXTBH3MWGcCA5EzIheNF4mMwZ5piZ+kf8qQW6C",
- "VbrJ+5f0oQYxkSKHLpzPxHLKOHiooAKq2hCiBclghi8tqCZmBgOrf1ELooDKdEFmQu4A1QIRwgu8XI6O",
- "fx0p4BlI3K0U2Ar/O5MAf0CiqZyDHr0bxxY30yATzZaRpZ067EtQZa4VwXdxjXO2Ak7MVxPyY6k0mQKh",
- "nLz+9hl5/Pjxl2YhS6o1ZI7IeldVzx6uyX4+Oh5lVIN/3KU1ms+FpDxLqvdff/sM53/jFjj0LaoUxA/L",
- "iXlCTp/3LcB/GCEhxjXMcR8a1G++iByK+ucpzISEgXtiXz7opoTzf9RdSalOF4VgXEf2heBTYh9HeVjw",
- "+TYeVgHQeL8wmJJm0F8fJF++e/9w/PDBh3/79ST5X/fn08cfBi7/WTXuDgxEX0xLKYGnm2QugeJpWVDe",
- "xcdrRw9qIco8Iwu6ws2nS2T17ltivrWsc0Xz0tAJS6U4yedCEerIKIMZLXNN/MSk5LlhU2Y0R+2EKVJI",
- "sWIZZGPDfS8WLF2QlCo7BL5HLlieGxosFWR9tBZf3ZbD9CFEiYHrUvjABX26yKjXtQMTsEZukKS5UJBo",
- "seN68jcO5RkJL5T6rlL7XVbkbAEEJzcP7GWLuOOGpvN8QzTua0aoIpT4q2lM2IxsREkucHNydo7fu9UY",
- "rC2JQRpuTuMeNYe3D30dZESQNxUiB8oRef7cdVHGZ2xeSlDkYgF64e48CaoQXAER039Cqs22//ebn14S",
- "IcmPoBSdwyuanhPgqcggm5DTGeFCB6ThaAlxaL7sW4eDK3bJ/1MJQxNLNS9oeh6/0XO2ZJFV/UjXbFku",
- "CS+XU5BmS/0VogWRoEvJ+wCyI+4gxSVddyc9kyVPcf/raRuynKE2poqcbhBhS7r+6sHYgaMIzXNSAM8Y",
- "nxO95r1ynJl7N3iJFCXPBog52uxpcLGqAlI2Y5CRapQtkLhpdsHD+H7w1MJXAI4fpBecapYd4HBYR2jG",
- "nG7zhBR0DgHJTMjPjrnhUy3OgVeETqYbfFRIWDFRquqjHhhx6u0SOBcakkLCjEVo7I1Dh2Ew9h3HgZdO",
- "BkoF15RxyAxzRqCFBsusemEKJtyu73Rv8SlV8MWTvju+fjpw92eivetbd3zQbuNLiT2SkavTPHUHNi5Z",
- "Nb4foB+Gcys2T+zPnY1k8zNz28xYjjfRP83+eTSUCplAAxH+blJszqkuJRy/5ffNXyQhbzTlGZWZ+WVp",
- "f/qxzDV7w+bmp9z+9ELMWfqGzXuQWcEaVbjws6X9x4wXZ8d6HdUrXghxXhbhgtKG4jrdkNPnfZtsx9yX",
- "ME8qbTdUPM7WXhnZ9wu9rjayB8he3BXUvHgOGwkGWprO8J/1DOmJzuQf5p+iyM3XupjFUGvo2F3JaD5w",
- "ZoWToshZSg0SX7vH5qlhAmAVCVq/cYQX6vH7AMRCigKkZnZQWhRJLlKaJ0pTjSP9u4TZ6Hj0b0e1/eXI",
- "fq6OgslfmK/e4EdGZLViUEKLYo8xXhnRR21hFoZB4yNkE5btodDEuN1EQ0rMsOAcVpTrSa2yNPhBdYB/",
- "dTPV+LbSjsV3SwXrRTixL05BWQnYvnhHkQD1BNFKEK0okM5zMa1+uHtSFDUG8flJUVh8oPQIDAUzWDOl",
- "1T1cPq1PUjjP6fMJ+S4cG0VxwfONuRysqGHuhpm7tdwtVtmW3BrqEe8ogtsp5MRsjUeDEfMPQXGoVixE",
- "bqSenbRiXv7evRuSmfl90MefB4mFuO0nLlS0HOasjoO/BMrN3RbldAnHmXsm5KT97eXIxowSJ5hL0crW",
- "/bTjbsFjhcILSQsLoHti71LGUUmzL1lYr8hNBzK6KMzBGQ5oDaG69FnbeR6ikCAptGD4Ohfp+fdULQ5w",
- "5qd+rO7xw2nIAmgGkiyoWkxGMSkjPF71aEOOmHkRFXwyDaaaVEs81PJ2LC2jmgZLc/DGxRKLevwOmR7I",
- "iO7yE/6H5sQ8NmfbsH477IScIQNT9jg7J0NmtH2rINiZzAtohRBkaRV8YrTuvaB8Vk8e36dBe/SNtSm4",
- "HXKLwB0S64Mfg6/FOgbD12LdOQJiDeoQ9GHGQTFSw1INgO+5g0zg/jv0USnppotkHHsIks0Cjeiq8DTw",
- "8MY3s9TG2ZOpkJfjPi22wkltcibUjBow33ELSfhqWSSOFCNmK/tCa6Day7edabSHj2GsgYU3ml4DFpQZ",
- "9RBYaA50aCyIZcFyOADpL6JMf0oVPH5E3nx/8vTho98ePf3CkGQhxVzSJZluNChy1+lmROlNDve6K0Pt",
- "qMx1fPQvnnhDZXPc2DhKlDKFJS26Q1kDqBWB7GvEvNfFWhPNuOoKwCGH8wwMJ7doJ9a2b0B7zpSRsJbT",
- "g2xGH8KyepaMOEgy2ElM+y6vnmYTLlFuZHkIVRakFDJiX8MjpkUq8mQFUjER8aa8cm8Q94YXb4v27xZa",
- "ckEVMXOj6bfkKFBEKEuv+XC+b4c+W/MaN1s5v11vZHVu3iH70kS+tyQqUoBM9JqTDKblvKEJzaRYEkoy",
- "/BDv6O9AoyhwxpbwRtNl8dNsdhhVUeBAEZWNLUGZmYh9w8j1ClLBbSTEDu3MjToEPW3EeBOd7gfAYeTN",
- "hqdoZzzEse1XXJeMo9NDbXgaaLEGxhyyeYMsr66t9qHDTnVHRcAx6HiBj9HQ8RxyTb8V8qy2BH4nRVkc",
- "XMhrzzl0OdQtxplSMvOt16EZn+fN6Ju5gX0SW+NHWdAzf3zdGhB6pMgXbL7QgVrxSgoxOzyMsVligOID",
- "q5Tl5puuavZSZIaZ6FIdQASrB6s5nKHbkK/RqSg1oYSLDHDzSxUXznriNdBRjP5tHcp7emH1rCkY6kpp",
- "aVZbFgS9t537ov4woak9oQmiRvX4riqno33LTmdjAXIJNNuQKQAnYuocRM51hYuk6HrWXrxxomGEXzTg",
- "KqRIQSnIEmeY2gmaf89eHXoLnhBwBLiahShBZlReGdjz1U44z2GTYKCEInd/+EXd+wjwaqFpvgOx+E4M",
- "vZWa77yAXaiHTb+N4NqTh2RHJRB/rxAtUJrNQUMfCvfCSe/+tSHq7OLV0bICif64a6V4P8nVCKgC9Zrp",
- "/arQlkVP+J9Tb42EZzaMUy68YBUbLKdKJ7vYsnmpoYObFQScMMaJceAewesFVdr6kBnP0PRlrxOcxwph",
- "Zop+gHvVEDPyL14D6Y6dmnuQq1JV6ogqi0JIDVlsDRzWW+Z6CetqLjELxq50Hi1IqWDXyH1YCsZ3yLIr",
- "sQiiunK1uCCL7uLQIWHu+U0UlQ0gakRsA+SNfyvAbhgC1QMIUzWiLeEw1aKcKu5qPFJaFIXhFjopefVd",
- "H5re2LdP9M/1u13iorq+tzMBCiOv3PsO8guLWRv8tqCKODjIkp4b2QPNINbZ3YXZHMZEMZ5Cso3yUcUz",
- "b4VHYOchLYu5pBkkGeR00x30Z/uY2MfbBsAdr9VdoSGxUUzxTa8p2QeNbBla4HgqJjwSfEJScwSNKlAT",
- "iPt6x8gZ4Ngx5uTo6E41FM4V3SI/Hi7bbnVkRLwNV0KbHXf0gCA7jj4E4B48VENfHhX4cVLrnu0p/gHK",
- "TVDJEftPsgHVt4R6/L0W0GNDdQHiwXlpsfcWB46yzV42toOP9B3ZHoPuKyo1S1mBus4PsDm46teeIOpm",
- "JBloynLISPDAqoFF+D2x8TftMS+nCg6yvXXB7xjfIsvJmUKRpwn8OWxQ535lAzsDU8chdNnIqOZ+opwg",
- "oD5czIjg4SuwpqnON0ZQ0wvYkAuQQFQ5XTKtbcB2U9XVokjCAaJ+jS0zOieeDYr0OzDEq/gGhwqW192K",
- "8cjqBNvhO2spBg10OF2gECIfYCHrICMKwaB4D1IIs+vMxY776GFPSQ0gHdNGD251/d9RDTTjCsg/RElS",
- "ylHlKjVUMo2QKCigAGlmMCJYNaeL7KgxBDkswWqS+OT+/fbC7993e84UmcGFT7gwL7bRcf8+2nFeCaUb",
- "h+sA9lBz3E4j1wc6fMzF57SQNk/ZHVngRh6yk69ag1deInOmlHKEa5Z/ZQbQOpnrIWsPaWRYVAWOO8iX",
- "EwwdWzfu+xu2LHOqD+G1ghXNE7ECKVkGOzm5m5gJ/s2K5j9Vn+3Q6eooMLZcQsaohnxDCgkp2Oh8I6qp",
- "auwJsXF76YLyOUroUpRzFzhmx0EOWyprC5El7wwRlWL0midoVY5xXBcs7BM0jPwC1OhQbZO01RguaDWf",
- "y8kZchX6nYuY6KNeqfGoV8U0SF3VKqZFTjPLZAD3bQhYAX7qiQf6LhB1Rtjo4ivcFkO9ZnOvx0ZeDx2D",
- "sjtxEMpWP+yLZjP6bb45gJRhByISCgkK74TQLqTsUzELM8rcpaE2SsOyazq3n/7Wc/xe9ypogueMQ7IU",
- "HDbRJGrG4Ud8GD1OeC/1fIwSQt+3baG/AX8LrOY8Q6jxqvjF3W6f0LaLSH0r5KF8kHbAwfL0AJffTv+2",
- "m/Kyjkma5xFfnss3aTMANa7y25kkVCmRMhSSTjM1tgfNuf9cckoT/a+qKNoDnL32uC2nVZjKiEZZyAtC",
- "SZozNNkKrrQsU/2WUzQKBUuNRBt57bffTPjMvxK3S0bMhm6ot5xipFllKopGSMwgYhf5FsBbC1U5n4PS",
- "LeViBvCWu7cYJyVnGudamuOS2PNSgMSQn4l9c0k3ZGZoQgvyB0hBpqVuituYTqU0y3PnQTPTEDF7y6km",
- "OVClyY+Mn61xOO9l90eWg74Q8rzCQvx2nwMHxVQSj4r6zj7FgFW3/IULXsX0d/vY+lzM+HXO1QZtRnVK",
- "9/+9+1/Hv54k/0uTPx4kX/7H0bv3Tz7cu9/58dGHr776f82fHn/46t5//XtspzzssWQfB/npc6eKnj5H",
- "faN2unRgvzGD+5LxJEpkYfhEi7bIXUxsdQR0r2mN0gt4y/WaG0Ja0ZxlhrdchhzaN0znLNrT0aKaxka0",
- "rE9+rXtK8VfgMiTCZFqs8dJSVDeQMJ5Wh15AlymH52VWcruVXvq2WSM+oEvMxlXqpK2qckwwr25BfTSi",
- "+/PR0y9G4zofrno+Go/c03cRSmbZOpb1mME6ppy5A4IH444iBd0o0HHugbBHY9dsMEU47BKMVq8WrLh5",
- "TqE0m8Y5nI/Fd0aeNT/lNkjenB/0KW6cq0LMbh5uLQEyKPQiVm2hIajhW/VuArTiPAopVsDHhE1g0jay",
- "ZEZfdFF0OdAZZv2j9imGaEPVObCE5qkiwHq4kEGWjBj9oMjjuPWH8chd/urg6pAbOAZXe87Kgej/1oLc",
- "+e6bM3LkGKa6YxNw7dBBymRElXZZQY0IIMPNbI0ZK+S95W/5c5gxzszz47c8o5oeTaliqToqFcivaU55",
- "CpO5IMc+0eg51fQt70havWWgghQvUpTTnKXkPFRIavK0pT26I7x9+yvN5+Lt23edYIiu+uCmivIXO0Fi",
- "BGFR6sQVJkgkXFAZczapKjEdR7aVR7bNaoVsUVqLpC984MaP8zxaFKqdoNpdflHkZvkBGSqXfmm2jCgt",
- "pJdFjIBiocH9fSncxSDphberlAoU+X1Ji18Z1+9I8rZ88OAxkEbG5u/uyjc0uSlgsHWlN4G2bVTBhVu1",
- "EtZa0qSg85hP6+3bXzXQAncf5eUl2jjynOBnjUxRHwmPQ9UL8Pjo3wALx95Zb7i4N/YrX4QqvgR8hFuI",
- "7xhxo/a0X3a/gtzRS29XK/+0s0ulXiTmbEdXpQyJ+52patPMjZDlwx8Um6O26sr4TIGkC0jPXX0VWBZ6",
- "M2587iNsnKDpWQdTtvKOzfzC2g/oEZgCKYuMOlGc8k07CV+B1j6O9zWcw+ZM1KUj9sm6byaBq76DipQa",
- "SJeGWMNj68Zob74L40LFvih8LjUm1XmyOK7own/Tf5CtyHuAQxwjikaSch8iqIwgwhJ/DwousVAz3pVI",
- "P7Y8o2VM7c0XqcLjeT9xr9TKk4u4CleDVnf7fAlYxktcKDKlRm4XrgKVTXQOuFip6Bx6JOTQKTMwnbjh",
- "yMFBdt170ZtOzNoXWue+iYJsX07MmqOUAuaJIRVUZlpxdn4m6/dzngksLOkQNs1RTKoCEi3TobLhHLOV",
- "8vpAixMwSF4LHB6MJkZCyWZBlS+OhTXE/FkeJANcY+L+tnItp0GIWFAorCrG4nlu+5x2tEtXtMVXavHl",
- "WULVckCpFSPhY1R6bDsERwEogxzmduH2ZU8odRGBeoMMHD/NZjnjQJJYtFlgBg2uGTcHGPn4PiHWAk8G",
- "jxAj4wBs9GfjwOSlCM8mn+8DJHdFEKgfGz3hwd8Qz9ey8ddG5BGFYeGsx6uVeg5AXYhidX+1AmVxGML4",
- "mBg2t6K5YXNO46sH6VQNQbG1VSPERVTc6xNntzhA7MWy15rsVXSZ1YQykwc6LtBtgXgq1olN2IxKvNP1",
- "1NB7NCQd00djB9PWZ7mjyFSsMUoHrxYbAr0Dln44PBiBhr9mCukVv+u7zS0w26bdLk3FqFAhyThzXkUu",
- "feLEkKl7JJg+crkblFy5FAAtY0ddv9gpvzuV1KZ40r3M61ttXJcS89k+sePfd4Siu9SDv64VpiqS8qot",
- "sUTtFM1gk2Z9mECEjBG9YRNdJ03XFaQgB1QKkoYQlZzHPKdGtwG8cd74zwLjBVahoXxzL4hgkjBnSkNt",
- "RPdxEh/DPEmx+J0Qs/7V6ULOzPpeC1FdU9aNiB82lnnjK8AQ4BmTSifogYguwbz0rUKl+lvzalxWasZI",
- "2VKxLIvzBpz2HDZJxvIyTq9u3h+em2lfVixRlVPkt4zbgJUpljaORk5umdoG125d8Au74Bf0YOsddhrM",
- "q2ZiacilOcdnci5anHcbO4gQYIw4urvWi9ItDDLIeO1yx0BuCnz8k23W185hyvzYO6N2fN5t3x1lR4qu",
- "JTAYbF0FQzeREUuYDioDd1NRe84ALQqWrVu2UDtqr8ZM9zJ4+HpqLSzg7rrBdmAgsHvGsmEkqGbpvFrA",
- "tzWeG5VrJoMwc9YscBcyhHAqpnyHgi6iqmy5Xbg6A5r/AJtfzLu4nNGH8ehqptMYrt2IO3D9qtreKJ7R",
- "NW9NaQ1PyJ4op0UhxYrmiTMw95GmFCtHmvi6t0ffMKuLmzHPvjl58cqB/2E8SnOgMqlEhd5V4XvFZ7Mq",
- "W6Wv54D4CuhG5/MyuxUlg82vSouFRumLBbhS0oE02ql5WTscgqPojNSzeITQTpOz843YJW7xkUBRuUhq",
- "8531kDS9InRFWe7tZh7anmgeXNywwqlRrhAOcGXvSuAkSw7KbjqnO346aurawZPCubYUu17aeu6KCN52",
- "oWPM86ZwXvclxYqV1irSZU68XKIlIVE5S+M2Vj5Vhji49Z2Zlwm+3COMmhFL1uOK5SULxjKvDalJ0wIy",
- "mCOKTBUti1Pjbipcr56Ss3+VQFgGXJtHEk9l66BieRNnbe9ep0Z26M7lBrYW+nr4q8gYYbXW9o2HQGwX",
- "MEJPXQfc55XK7BdaWaTMD4FLYg+Hfzhj50rc4qx39OGo2QYvLpoet7C1Tpf/GcKwNdZ39/XxyqsrG9sz",
- "R7RPD1PJTIo/IK7noXocSTTy9WkZRrn8AWGiQ9idosFiKutO3W6onr13u/ukm9AK1QxS6KF63PnALYeF",
- "Mr2FmnK71bZtRiPWLU4wYVTpkR2/JhgHcycSN6cXUxqrImqEDAPTSe0AbtjStSD+Y497VWVb2NlJ4Euu",
- "3mU2ibwAWecAdgvSXFJgsNMOFhVqyQCpNpQJxtb/lysRGabkF5Tb7ivmO3uU3NcKrPHLfHUhJJaAUHGz",
- "fwYpW9I8LjlkadfEm7E5s41FSgVB5wo3kG3aZKnIdf+ocogcak5n5ME4aJ/jdiNjK6bYNAd846F9Y0oV",
- "cvLKEFV9YpYHXC8Uvv5owOuLkmcSMr1QFrFKkEqoQ/Wmcl5NQV8AcPIA33v4JbmLbjvFVnDPYNHdz6Pj",
- "h1+i0dX+8SB2AbjGMNu4SYbs5O+OncTpGP2WdgzDuN2ok2i2vO0M18+4tpwm++mQs4RvOl63+ywtKadz",
- "iEeKLHfAZL/F3URDWgsvPLNtjZSWYkOYjs8Pmhr+1BN9btifBYOkYrlkeumcO0osDT3VbSnspH442yPJ",
- "VRT2cPmH6CMtvIuopUTerNHU3m+xVaMn+yVdQhOtY0Jt3Y+c1dELvs45OfVlhbDEclVZ2eLGzGWWjmIO",
- "BjPMSCEZ16hYlHqW/I2kCyppatjfpA/cZPrFk0hZ6WZ5U74f4DeOdwkK5CqOetlD9l6GcN+Su1zwZGk4",
- "SnavzvYITmWvMzfutuvzHW4feqhQZkZJesmtbJAbDTj1lQiPbxnwiqRYrWcvetx7ZTdOmaWMkwctzQ79",
- "/PqFkzKWQsZqBdbH3UkcErRksMLYvfgmmTGvuBcyH7QLV4H+43oevMgZiGX+LMcUga9FRDv1pc4rS7qL",
- "VY9YB/qOqXlgyGDqhhqTZlnpm+ejh4mCinu6vGG769gyTzwe8I82Ij4yueAG1r58u5IeQgnK6kdJJque",
- "Bz52Sr4W66GE0zqFnng+ARRFUVKyPPulzvxsdS2QlKeLqM9saj78re6vVi3O3oHRsn8Lyjnk0eGsvPmb",
- "l0sjkvM/xdB5lowPfLfdSMEut7W4GvAmmB4oP6FBL9O5mSDEajOprgrazuciIzhPXWOuPq7dBhxBmfR/",
- "laB0LEEJH9jAMbSNGnZgq3QT4BlqpBPynW2hvADSKCCEmqCvFNHMmi6LXNBsjBUszr45eUHsrPYb2yXI",
- "VgmfoyLUXEXLJhaUzxwWguwb/sTTI4aPsz1e26xa6aQq6h1LQDVv1GXHWctPgCpSiJ0JeR40Q7W5qmYI",
- "Qw8zJpdGq6tGs/IR0oT5j9Y0XaDa12Ct/SQ/vLy9p0oVtJSsWkNVNSXx3Bm4XYV7W+B+TITRzS+Ysp1z",
- "YQXNnNcqAdyZHXwObHN5suTcUspkj1uuqiC5L9o9cPaK9K6EKGQtxO8p9NvuEPtW+3+DX0VLXLVbB3R6",
- "SdoMyqrlj++InlIuOEuxwFTsinYtdof42QbU4mobcv0Rdyc0criiDQuqUDyHxd4WBp4ROsR1Df3BU7Op",
- "ljrsnxp7uS6oJnPQynE2yMa+74azNTKuwNUIxYbMAZ8UsuG7RA4ZdYcnldtkTzLC1Jse5fFb8+ylMy1g",
- "TPo546hEOLQ5wc9aA7EDqDaaB9NkLkC59TTzj9Wv5psJpuJmsH438R1DcQzr+jPLtn7u7lAn3uvtvMzm",
- "3WfmXVcgqfq5EeVsJz0pCjdpf1eWqDyg17wXwRHvZeLdRwFyq/HD0baQ29ZwFbxPDaHBCp3dUOA93CGM",
- "qkNJq/uVEVotReEbxIaJRaskMB4B4wXjUPezjVwQafRKwI3B89rznUol1VYEHMTTzoDm6OGOMTSlnXvj",
- "qkO1y0MZlOAa/Rz921g3V+lhHNULteBG+aZqo2uoOxAmnmH/bofIbqsUlKqcEJVh1kKreUqMcRjG7dsz",
- "NS+A7jHoykT2cy2pPTn73ER9iajTMpuDTmiWxUq2fo1PCT4lWYmSA6whLavSnkVBUqy70ixE06U2N1Eq",
- "uCqXW+byL1xxuqAbUYQawo5Ifocx0WW6wX9jdS37d8YFeuwdauijOrL9qi91QydjUq+h6USxeTIcE3in",
- "XB0d9dSXI/T6+4NSei7mTUBuuPzENi4X7lGMv31jLo6wOkOnWKu9WqriCRjYJ3wPSVQbq7TfJlfCq6xT",
- "vRUdSlWPuu0GiP5uc2O8/HrCe4OiG9Ter9ZD2Rfkm/bGpFPtsuM0JVtZUG/GkY0QsrlFCEXcOtsXFWSD",
- "gszjztfDJMOOnK3jhQ8DhPpwsy5AP/hYVlJQ5tzvNbPoYtZFvXfzEIbEw9Yb3F6EiyXvtdj9sOqL+/bF",
- "2PB5uxvVObiU+ULCionSO7Z95JNXCe2vjd5OVeR9dP1dwytO9XHNob3G2zPXFcAu0+nkP/xi4+QIcC03",
- "n4Apt7PpnT5XXWnXmqfqV0hVUHpQgenGrTikUGGsJp6TDRudtnb0CeuQ1fMh4kC379d4dJrtdWHG6iqO",
- "7CixYxfv4tVfdqouNYVHrBCK1XXdY+29BoYYnmGHrqBsVncsH9+zglRjMf86bkEC7FNEy0wWNAy9LT/V",
- "o05XkZiu6tS2UlPdCv477vhONliQ0Wirn0+GF1Y6qaLTkE9jNeQ5cNezs5nnMTjafDaDVLPVjuy7vy+A",
- "B5ldY2+Xsb23g2Q8VkUvY/GW/a2ONUDbkuO2whMUUbwyOH25N+ewuaNIgxqi5djH/qq9TN0OxAByh8SQ",
- "iFCx6A9rSHYOeaYqykAs+Ggr+znUFdB6OzkFuaSXnMuTpLk46vzSLVPGW8kMmst8ulfWNQbi9iXodTtR",
- "9Osfz7Hxh6q6LPq6H6GWTk671REvXN0QzJWsfCe+gggo/5tPjLaz5Owcwl5T6Km6oDLzb0RNL96qk2y5",
- "jzpZdb6LQhvoWTUzq2Nju3lUkXpbGAGd5sKIEUlfGHkzHLWK5bijbNCNLf+OgbYGrhlI15MP5d9cKEi0",
- "8LG02+DYhgobWXQpJKjeGpcWuN7KM6/r0jpY65dipRnqAorCBRIJS2qgk0EBnP45tyH7mX3uE4d8rded",
- "FqaKXnc3HfBR0Ux1kBhS/Yy423J3QtJljE2Mc9v3WcWq4XCQTW9IIUVWpvaCDg9GZZAbXGtqCyuJ2mnS",
- "7ipbOkKQ1XkOmyOrBPluDX4HQ6Ct5GRBD6ootDb5oOY3FYN7fhDwPqblajwqhMiTHmfHabeET5viz1l6",
- "DhkxN4WPHuzpfEPuoo298mZfLDa+ZE1RAIfs3oSQE27jtb1ju1lDujU5v6O3zb/GWbPSVtVyRrXJWx4P",
- "fMV6V/KK3MwPs52HKTCs7opT2UF2FIhZ95QPkvQi0gdqMlQr77qa2715aqKyUMRkkrrtzI44mSpEpu78",
- "UYfJdKWDPBcXCVJRUtX/iukc5r0mk/QVT+vPDLanEMTbUOUu0A1Z0IykQkpIwy/iKQ4WqKWQkOQCw29i",
- "nsGZNvLQEuOaOcnFnIjCqLm2jJ73oUTb0gRz2TRb+2ViHTU9hQxAubRaN419uTvPlu41+3fGOVtE7C2I",
- "aI/lvdvfOELZu2tFAOYAAt1tazqJdfdprqvdH6qvW5sWS5bG0f15RZn0xobs6F0UWV9Fjq61ks8K7MFV",
- "1GW73UNq+9BNh/pJq5rJA49FAEC/57QBwyD/6b5gzLCvY0IjSD6tpNZxo+0ua519X8/O0nhKrda6AGLG",
- "LiW4LDXbgK7VOaegeuFvMfN6V7c0egooTCGz7T+ospYQb5Fx3e/a4oEokhxW0HAou9S5Mk1BKbaCsHOe",
- "/ZhkAAXaJ9tSc8xTGnK5lijl1p4EvrYh2I3KVhaxdqfIDsEpKuateWKPiRp6lAxEK5aVtIE/dYVeZH1t",
- "yCJs2MM6kFPszSTii9vGInbGNiDNR88lj4c2hJmblVEEZ8sq46klwvpkq4Je8H4lImJ3qvztV18HwcGI",
- "amVS9175stqVyyqQvZSxjTA6/QOjMocC3/81LHrixS33bUTGsqYupiIDMFWfZ4zegzo6LHhtSTckY7MZ",
- "SGvMV5ryjMosfJ1xkoLUlBnNZqMuL9YaaGUJ452SreGuOKhnMDEZF+1SFpB841SGK0id6LmJSJz2qtWi",
- "r0ViZ1fi6QR0baRrjKvqIQKXCI2ytT1ggqOARJb0HPacR7E/YPs0WJ7E2f60wFmHTBHztV6yttog1t0N",
- "Q4jcbkEzxO2eobD0Yp3TJW00C1qS/QXZpvEf64tzWFtG/8EO8EKHYdCY0dtuHDgfOTnqxwopwVLe9VFC",
- "Y/m7fJBugbWkEWyRYwRagy2EawPqm/sSOJjVs8pv29dDtO3exTqLgtsmfx23sOVNtmtfQDjmLMgVzW/e",
- "tYsFOE8QH5C97jcGh77BEMkWlepymQkv6KC5Az/g4abmr9AV/XcwexTVSt1QToSpxHofzIM3C82t4WLm",
- "W3itgJMLHNPGsT38gkxd5nYhIWWqLRpd+O4alSsMm025bJC13uF727XOX4S+AhnPvKZBXtaV+lHHn/Ma",
- "wvqIfmSm0nNyo1Qeo74OWUTwF+NRYQm1HdfFeSPAzXY+aWVuCAkHDnQLQtb3DHTrFocbujwbzGUunVJB",
- "d52Db+sGbiMXdb22oVGaXeRuK+c+JLgy3qXBfI7RnRYh2OKEIKjk94e/Ewkz7GEoyP37OMH9+2P36u+P",
- "mo/Ncb5/Pyqd3Vhcp8WRG8PNG6OYX/oy/Ww2W09SaWs/SpZnuwijkSJcdwHFJNjfXCGCj9KH9Dcba9I9",
- "qq4X3BUC5CxiImttTB5MFST/Dsj7dZ9FsnzRj5OWkukN1kf09gP2WzQC9bsqmslFw1X6obv7tDiHqsJm",
- "HftUKn+7fidojveRVVu5uYVEPiHfrOmyyMEdlK/uTP8THv/tSfbg8cP/nP7twdMHKTx5+uWDB/TLJ/Th",
- "l48fwqO/PX3yAB7Ovvhy+ih79OTR9MmjJ188/TJ9/OTh9MkXX/7nHcOHDMgW0JGvxjP6H2zWm5y8Ok3O",
- "DLA1TmjBfoCN7QtoyNh3HKQpnkRYUpaPjv1P/8efsEkqlvXw/teRK/YxWmhdqOOjo4uLi0n4ydEcgx0S",
- "Lcp0ceTn6bQkPHl1WnmJrBUId9TmyXrrnieFE3z2+ps3Z+Tk1ekk6Fd/PHoweTB5iO3NC+C0YKPj0WP8",
- "CU/PAvf9yBHb6Pj9h/HoaAE0x9hA88cStGSpfySBZhv3f3VB53OQE9eG0fy0enTkxYqj9y7o44OZIapP",
- "2xTxIC+4253QBZChMcqmgDe6/SjXfGZc9YByxkaeYeaujaMwbK5C3GlWNzs4rZmWL/loa2Af/xoJxJ2x",
- "OZoefCXCRodI1yCOKfLfb356SYQkTr15RdPzym9BTme2fJcUK4YJoVmQRWy+nHj6/VcJclPTl+N8YX1n",
- "39LHOUCWal40c9JqqSrmo4l1gsSZDVkEhF2FaNWMC000Yf/cig0b1vog+fLd+6d/+zAaAAjGCyrA6l+/",
- "0zz/nVwwbCiI9kVfP9PVRxtH2tegND2uQ37wg3onx5hUVz0NOxRW7zRTuX/ngsPvfdvgAIvuA81z86Lg",
- "ENuDd1ifCokFz9yjBw8O1tq0ql5gvTTVKJ4kLjFQlyHZR1WL1AtJC3sWfYdTdHSjKuwXig1dnxxwoc3c",
- "oysvtz1cZ9Ff0wy7xoHSdikPP9ulnHIM2TUXBLEX4Ifx6OlnvDen3PAcmhN8Myj+2L1ofubnXFxw/6YR",
- "fsrlksoNijZBa8tWZRQ6VxgPgizSnu1GM7vRuw+9t95R2Kvr6H0j6jO70p3YaVN4+nzHNXlH9XHObun0",
- "Visw87zq9IRxga7fGfaeUvcm5Lvwa+TeWInM1vkqJYfMB236W68qreoLttaw3VFhkbbopR2Yi2/v7499",
- "f580jR2N8twxYBqnYCtMHa/CVS/Qrqu01ez5Us2Ug6Zcl2htcq0dJ1u6pp3pXUwV3Mmob3HXg7s+MSmA",
- "t5KYms3Urp81+yTB6iZpXBnXyLg/c6HvR5obOgmW2yrGY2vW3wqDfxlhsEommlvpzLVpuZp4iA0bj977",
- "PgQHEAldH4YBwmCoVgffBlEfd1vs5N7ENhUI37kcz3DZQzvFPOwOcSvgfQICXrfzSgyMup/GxxPqEIZF",
- "3ZplZxcY31Sl3Ut/rxYyn6kU9xdGVq/YZiDdLbBdgn12hDHHrK+Nrf4phTCHtFvx6y8tflU5vVcSwBq9",
- "k1yWeODGupL1rm2dY7qSxJp53QFnw5Bqw1DcER7XfR4Ni8GymL4imhp7zRDdqVZptJs17uiNXRHrOwgV",
- "1K83p893SVefkZ1ncHnmyC0Q35vr5qVRt8Prm3E7DONNTx48uTkIwl14KTT5Fm/xa+aQ18rS4mS1Lwvb",
- "xpGOprY5xTauxFtsCRlF3XQi4FFVQYtx8Ny8baM07rrO6mFBr3sT4lthqKqxl0vsmgvDqHxOCZVz+5Hh",
- "dQYZ5I7/8xjHvzMh32JCj1ZjDDbTrusTucO4Pn746PET94qkFzaWq/3e9IsnxydffeVeqxufWD2n87rS",
- "8ngBeS7cB+6O6I5rHhz/zz/+dzKZ3NnJVsX6681LWwH4U+GtXfUuJIC+3frMNymmrfteHrtQdyPu+6/F",
- "OnoLiPXtLfTRbiGD/T/F7TNtkpFTRCtLZqNG0AFvI3tM9rmPxr7Jh+E71WUyIS+FK9dW5lQSITOQrhPi",
- "vKSScg2QTTylYtKpsuWp0pwB10ZxxN5uMlEsA1vlZl5KqNLnCgkrjJHH6VGnb0Cwm9FjJO0ny+R/pOug",
- "hNO0uqa1cEtGs+eSrn13SeyfJiT+9NVX5MG41l7y3AyQVIiJMdclXY9u0OpXEdug+PNmc6adAbo49hAL",
- "Ui39VDm0YSeYvzbn/mwld0vubmMPxDn3dvzUjp3QjuCKom21IFjBzvaexGaImzob30h5XoSKszgzw1Dj",
- "wCfsI9hpmo4qoW303h7iWyPAlVhJm6D2ZBuYdaqO3qNeHvKMzrnFrLm/lrs08B1JsfTOI0FmoNOFS9ht",
- "oT7CnnxrqH7etK35+KGlGtzFbtWLsCY1NsUeWPYsyKVEBx7ICBH/5Ls0mMdsZgvM+IJMvsc+uqaYbztb",
- "dZx1fbldqQktqrxes4t7QfmsnrwrkCFaDuH/vEXwfgjuMMdvfOtRxJhbxJ8h4t+rkgl5Keq0cdf16s/o",
- "erzOm/26F/RScLA+diP5Wlq8dadWYodhHBYpvl6I1V+q/iOXFkGOfBPdrXLI97aF7VZZZMjtbSb7LK/w",
- "7x2WttwyZm2TncUQ6tGGMGfzoq2C1eyI8RG1mI/CTz9B1eZjcKybYTF4SD2fcWIBPyzTwRI8lpiPqmYI",
- "fRwo3l9mMDfSogpDi7aEmUIu+Fx9mqxoa6efKF4iVFJ13om31/nrnd1nWN3HqLw2AtLVe1KMp2CbRGN/",
- "O6bIkinlgiWfPPjbzUGo2dJXFOdh7upH5i5PHzy+uenfgFyxFMgZLAshqWT5hvzMq4beV+F22Dyoqr/m",
- "rcHRflHobWrWBUvDIkaXZ4KN0LX3es2yD7uZYVCxck8+yHjAB8PyhrQogMrLM8Ddrqt2ue3T52F0cKOn",
- "TVVRKwKKQdGeAfL/MRpod8K0dzFzl1/JLaC++pdjEy50V8zGVXCMkQLE7Ji85feJWtCnDx/99ujpF/7P",
- "R0+/6LGcmXlc0Z6u7aweyDy2wwwxoH3W5sDDSu0Vfo9verf328TxiGXraNeLuo9dp+i1E8vuKFLQTW9r",
- "nGJHH75w2Lon380XO1SaTRdR/cqrP1VZ+VP+daUF24p8rn3dbf+9nuSJgM8YQqsb8VVY396Tb4s02SLL",
- "qvnZTSundZKBveg88mTrzvmogq7+WEpqgjoqcC/YNNHy8WRK7MwyDtzdhRRapCK3sStlUQipq9OtJoPE",
- "Pehz2zWkvT7C3UuYS6lOF2Vx9B7/gxW+PtSJB7YB+5Fe8yOsHn30fmuIAIIYaQ1r5dJoS4WumjygM+2u",
- "EIDWiRm3D5GthI2xBBH57Hqks7+0ULNfp9+rmrQjI3YOcJVXF3TRrmg3KPy9o7nx5NYF84ktqDaKzBjP",
- "CA22saW7CVkzgms2jFz3oj+GneXm/U5PP+Nz9lJocrosbMMcyK4WvUPaHM7fHluv2/0EA3f1d0N8und+",
- "eOP7wMTKur7zgt/DIRekYoOfjkrMjTZ39fXYvm9v8k/7Jn/mSw43yPD2Xv587mXpwylvr+BP/wp+/Nmu",
- "5hodMQOvZH8TXfoarjXxPS/kSJdQNBm0XOHb/DSoerdXqb4V0re3uL3FP1Mng93JwUlLQyw0u1KZ3JSH",
- "CJ39pKAfZmfI84iloe+gjm2vH70AhkVnRMqwfvhppsb2EDvjhDvFt4LPJy34BHt9K/fcmh4+M9NDj5Tj",
- "tP5mk9Y+QWNfAWi1FBn4qBMxm7kib33ST7P3jCFPpemyIPbLqJSD3tgztoQ35s2f7BQHvWJrsFtiUQs8",
- "gywFqeCZGuAVdaNe9h5CN24/ADfuAa12wMPi0r8nlybZ10ENmQ4lkDbyFfYM8sXuHDIyWBFDgJMDkO3R",
- "e/svmtMKoWJdlz0BdzbmrtsWW73PjtsAkLxCIdSWAfRfiRl5YIv4lRwzdermgJRnRMuNEVR9zRIJNCdp",
- "I0K/gqN7ct70npydqkBndT1riusCoj6hhwxnbWVH/XDjB+AZ5Y7kuwjSglDCYU41W4GPW5/cZtRf+jZz",
- "+exbGOCY0Cyzp7HeBFiB3BBVTpWRdXgz0PKOap6XPRgGrAuQzFzRNK8d8FZNOLLp8tsCKt/YN654abV4",
- "kU3Sl80oIH+zuhR+MSM/slSKk3wulI/rUhulYdlpvec+/a2n6Ko3JHRjwATPGYdkKXisIdxP+PRHfBjt",
- "py40zfs+PjMP+75t3bdN+FtgNecZcidfFb+fyOm/Uq5Ga7USCiG1b9IPTk3e8yj5Q7PhafckbXgaOLXc",
- "w2CgsH1c4+ej940/XbEM96ZalDoTF8G3qNnboJ8hefJBo+pLWNJaDZ/V9drSrtOHFOAhdmKqp5HWX0E7",
- "8t7uX3/R/BDncgmJBEM3U7ECqVrq2W2SyJ8qSWTwvu/FY22ry10crVSHlUheigzsuM1Os7H6zFxk4Dpy",
- "dgWRKtgxHljvb6X6vVaoc0rL+UKTsiBaxIKq6w8Tmlomm1j1Jj5hUBHNKkE43YKugNAc+5ySKQAnYmoW",
- "Xd+PuEiqsCadj8x2IZ1RUSiAq5AiBaUgS3w96l2gVX1OMY5bb8ETAo4AV7MQJciMyisDe77aCWfVJ1yR",
- "uz/8YhTmG4fXioLbEWsrYUXQW1XbcNJeF+ph028juPbkIdlRCcSLBphIIpZFDi6VJILCvXDSu39tiDq7",
- "eHW0YK4Fu2aK95NcjYAqUK+Z3q8KbVkk5v7ugvjMPj1jS5TEOOXC2xVjg+VU6WQXWzYvhWtRZgUBJ4xx",
- "Yhy4R+F8QZV+7bIKM6xAY68TnMfK2GaKfoBXff3ozci/VN3oO2On5j7kqlRVy3qXKQBZbA0c1lvmegnr",
- "ai5M6/RjV6kI1sK3a+Q+LAXjO2QFRbkJ1YE33wwXWRzaH6kzUHRR2QCiRsQ2QN74twLshm78HkCYqhFt",
- "CQeLjIaUMxUiB8ptRpcoCsMtdFLy6rs+NL2xb5/on+t3u8RFdX1vZwJUmCbiIL+wmFVooF1QRRwcZEnP",
- "XSbJ3DVZ6sJsDmOCGeDJNspHk615KzwCOw9pWcwlzSDJIKcRU8rP9jGxj7cNgDvuyTNZCQ3JFGZCQnzT",
- "a0qWvSaiamiB46mY8EjwCUnNETTKc00g7usdI2eAY8eYk6OjO9VQOFd0i/x4uGy71T1mKTOG2XFHDwiy",
- "4+hDAO7BQzX05VGBHye1+aA9xT9AuQkqOWL/STag+pZQj7/XAtrmvPACa9wULfbe4sBRttnLxnbwkb4j",
- "GzMgfpbG/nbs0jVWf2kaUAMFcHIZ5fbogjKdzIS0gnRCZxrkzoD4v1Pm3eHONaCFq01AcAR3b7pxkMmH",
- "rS4cF7EgEHddGBLp+t/MVN8KOajEZrOQDGWalFyzPCgzXqnKn57B8NYIcGsEuDUC3BoBbo0At0aAWyPA",
- "rRHg1ghwawS4NQLcGgH+ukaAj1U0N/EShy8lxgVP2lGJ5DYq8U9VZLK6q7xRAs0YF5Rp1zXT5/u7J1er",
- "sauB5ogDlkN/nLQN3zz75uQFUaKUKZDUQMg4KXJqdANY66qHW7M7qO9bbBtB2sajVMHjR+TN9ye+Ft7C",
- "1Wxrvnv3xPX/VnqTwz3XJQF4ZkVR3y4BuEG665ZA/Z3ge725zncsxxhzRb7Bt5/DCnJRgLRltoiWZcTk",
- "cwY0f+Zws8Pi83czuQta/d2M9vu4YWhyaFvSwsv5fq1UEWpzF8nzIJvx9xnNFfzel9Box1vSItZurbr5",
- "rC0IucnXItu0TojZtSPcwObZqCviMU7lJlJvqZtM0CYNLQy/coTVNWZ9OHjdxi7RdslsF4XFxHUJKnqO",
- "t1F5tGBhtWGdoWzK66xFJ6NYtma7St+oAnBICOwZJhzYPSGv7Xcftyo8QuSOWM3MP5nIweabFdPAd40W",
- "4VjP5xqV7xEfPb149seGsLMyBcK0Ir704+7rZTxaJ2akOfDEMaBkKrJN0mBfo8YtlDFFlYLldPdNFPJP",
- "12DYXT7myfZ76uNcI8+DxW3jySHRrBPHgHu480bDYN5cYQtHdOw5wPh1s+g+NhqCQBx/ilmVWrxvX6ZX",
- "T7O5ZXy3jC84jS2JgHFXKrfNRCbXyPjkRpa8n+d9s4a0NMCFJ/kumufRJwdr3XBsZjAt53NslNxx0pml",
- "AY7HBP9IrNAudygX3I+C7OBV88yrpnu3h+tylyAD+66vcXgPt4PyDXozlgXlG+/zhUSxZZlbHNoec4dl",
- "tLaabTcSAP2xzvjXZ9Z+5W1+gfHWXbXN3y1ayAVVxO4vZKTkmcsd6tS8XvPhFUPs0GdrXrPprdVB7Hoj",
- "q3PzDrki/C43k7YVKUAmes3tgWp2Ure1te3Jndw2iP1rXBs25Rt6GGy3TnTNEA50e8iAr+H1EXQDqZPh",
- "Gj1C0GrRnzoStgaxbx40eqQzfDOIpDapOCcp5AWhvnt/KrjSskz1W07RSRMsbNINMPHW6H7+9sy/EvcT",
- "Rtx4bqi3nGJz98p1E+VzM4j4Kb4F8GxUlfM5KMMrQyKZAbzl7i3GScmNpiVmZMlSKRKbiGrOkJFPJvbN",
- "Jd2QGdb/EOQPkIJMzc0e7Lo1GCvN8txFtJhpiJi95VSTHKjS5EdmuKwZzhcfqEK5QF8IeV5hId4pYg4c",
- "FFNJ3PjynX2KzRjc8r2RDw2W9nFdRP1muzB42FnWC/npcwM3xdrFOVO6DoLowH5jDvAl40mUyM4WQFxM",
- "WJu2yF2smOYI6F7TO6QX8JabG04Lglyd6suRQ9vN0zmL9nS0qKaxES1vkF/rIBXvIFyGRJjMrWvlT5Sa",
- "GdCBd1/ixttq9K2939ON0rhygWfmac+FbJ+65l09LzkloWEIa5WDcW+cNUD+8zZ+f3c9+qJH48E0xu6A",
- "XXbVbM+EePMbPiY0F3xuqxAaDVLgPjFelBoDq6/TSAcrmidiBVKyDNTAlTLBv1nR/Kfqsx0XYNBcbrmE",
- "jFEN+YYUElLIbJ0spkitJE9spQGSLiif410pRTlf2NfsOBcgoerDZfTS9hDxOiVrntiaaV0YT4g1MIZl",
- "ZYGmi0hfE7xRjCLsd9CWgRii6kaOMFbE7NN8x6NeydYgdVUHpFnkNM/1gGu7cQEH+KknPkQJ0Vsqu6Wy",
- "vaksVmIPUTdr6dwWX+G2XLNx5roLSt6greejVJu9Ldn+Zy/Z7jmQIpRI2pCy473CqCJMkwssqDMFYi6M",
- "Em3MrqW400gnxDCkwJ5uKy8q1+kyXVDGXTWWKjwf4dCuG6/27f+uxTxnmRna5Qw6IC0l0xuUy2nBfjsH",
- "8/93RrBVIFdeZC9lPjoeLbQujo+OcpHSfCGUPhp9GIfPVOvhuwr+917aLiRbGQ3iw7sP/z8AAP//R7K1",
- "ReZ0AQA=",
+ "H4sIAAAAAAAC/+y9e3PbtrY4+lUw+v1m8jiinFd7dj3TOddN2m6fpmkmdrvPPk1uC5FLErYpgBsAZam5",
+ "+e53sACQIAlKlC07Seu/EosksLCwsF5Yj/ejVCwLwYFrNTp+PyqopEvQIPEvmqai5DphmfkrA5VKVmgm",
+ "+OjYPyNKS8bno/GImV8Lqhej8YjTJdTvmO/HIwn/LpmEbHSsZQnjkUoXsKRmYL0pzNvVSOtkLhI3xIkd",
+ "4vTF6MOWBzTLJCjVhfInnm8I42leZkC0pFzR1DxS5JLpBdELpoj7mDBOBAciZkQvGi+TGYM8UxO/yH+X",
+ "IDfBKt3k/Uv6UIOYSJFDF87nYjllHDxUUAFVbQjRgmQww5cWVBMzg4HVv6gFUUBluiAzIXeAaoEI4QVe",
+ "LkfHv44U8Awk7lYKbIX/nUmAPyDRVM5Bj96NY4ubaZCJZsvI0k4d9iWoMteK4Lu4xjlbASfmqwn5sVSa",
+ "TIFQTt5895w8ffr0K7OQJdUaMkdkvauqZw/XZD8fHY8yqsE/7tIazedCUp4l1ftvvnuO85+5BQ59iyoF",
+ "8cNyYp6Q0xd9C/AfRkiIcQ1z3IcG9ZsvIoei/nkKMyFh4J7Ylw+6KeH8H3VXUqrTRSEY15F9IfiU2MdR",
+ "HhZ8vo2HVQA03i8MpqQZ9NdHyVfv3j8eP3704f/8epL8r/vzi6cfBi7/eTXuDgxEX0xLKYGnm2QugeJp",
+ "WVDexccbRw9qIco8Iwu6ws2nS2T17ltivrWsc0Xz0tAJS6U4yedCEerIKIMZLXNN/MSk5LlhU2Y0R+2E",
+ "KVJIsWIZZGPDfS8XLF2QlCo7BL5HLlmeGxosFWR9tBZf3ZbD9CFEiYHrSvjABX26yKjXtQMTsEZukKS5",
+ "UJBosUM8eYlDeUZCgVLLKrWfsCLnCyA4uXlghS3ijhuazvMN0bivGaGKUOJF05iwGdmIklzi5uTsAr93",
+ "qzFYWxKDNNychhw1h7cPfR1kRJA3FSIHyhF5/tx1UcZnbF5KUORyAXrhZJ4EVQiugIjpvyDVZtv/++yn",
+ "V0RI8iMoRefwmqYXBHgqMsgm5HRGuNABaThaQhyaL/vW4eCKCfl/KWFoYqnmBU0v4hI9Z0sWWdWPdM2W",
+ "5ZLwcjkFabbUixAtiARdSt4HkB1xByku6bo76bkseYr7X0/b0OUMtTFV5HSDCFvS9dePxg4cRWiekwJ4",
+ "xvic6DXv1ePM3LvBS6QoeTZAzdFmTwPBqgpI2YxBRqpRtkDiptkFD+P7wVMrXwE4fpBecKpZdoDDYR2h",
+ "GXO6zRNS0DkEJDMhPzvmhk+1uABeETqZbvBRIWHFRKmqj3pgxKm3a+BcaEgKCTMWobEzhw7DYOw7jgMv",
+ "nQ6UCq4p45AZ5oxACw2WWfXCFEy43d7pSvEpVfDlsz4ZXz8duPsz0d71rTs+aLfxpcQeyYjoNE/dgY1r",
+ "Vo3vB9iH4dyKzRP7c2cj2fzcSJsZy1ES/cvsn0dDqZAJNBDhZZNic051KeH4LX9o/iIJOdOUZ1Rm5pel",
+ "/enHMtfsjM3NT7n96aWYs/SMzXuQWcEaNbjws6X9x4wXZ8d6HbUrXgpxURbhgtKG4TrdkNMXfZtsx9yX",
+ "ME8qazc0PM7X3hjZ9wu9rjayB8he3BXUvHgBGwkGWprO8J/1DOmJzuQf5p+iyM3XupjFUGvo2IlkdB84",
+ "t8JJUeQspQaJb9xj89QwAbCGBK3fOEKBevw+ALGQogCpmR2UFkWSi5TmidJU40j/V8JsdDz6P0e1/+XI",
+ "fq6Ogslfmq/O8COjslo1KKFFsccYr43qo7YwC8Og8RGyCcv2UGli3G6iISVmWHAOK8r1pDZZGvygOsC/",
+ "uplqfFttx+K7ZYL1IpzYF6egrAZsX7ynSIB6gmgliFZUSOe5mFY/3D8pihqD+PykKCw+UHsEhooZrJnS",
+ "6gEun9YnKZzn9MWEfB+Ojaq44PnGCAerahjZMHNSy0mxyrfk1lCPeE8R3E4hJ2ZrPBqMmn8IikOzYiFy",
+ "o/XspBXz8t/duyGZmd8Hffx5kFiI237iQkPLYc7aOPhLYNzcb1FOl3Ccu2dCTtrfXo1szChxgrkSrWzd",
+ "TzvuFjxWKLyUtLAAuidWljKORpp9ycJ6TW46kNFFYQ7OcEBrCNWVz9rO8xCFBEmhBcM3uUgv/k7V4gBn",
+ "furH6h4/nIYsgGYgyYKqxWQU0zLC41WPNuSImRfRwCfTYKpJtcRDLW/H0jKqabA0B29cLbGox++Q6YGM",
+ "2C4/4X9oTsxjc7YN67fDTsg5MjBlj7O7ZMiMtW8NBDuTeQG9EIIsrYFPjNW9F5TP68nj+zRoj761PgW3",
+ "Q24RuENiffBj8I1Yx2D4Rqw7R0CsQR2CPsw4qEZqWKoB8L1wkAncf4c+KiXddJGMYw9BslmgUV0VngYe",
+ "SnwzS+2cPZkKeTXu02IrnNQuZ0LNqAHzHbeQhK+WReJIMeK2si+0Bqpv+bYzjfbwMYw1sHCm6Q1gQZlR",
+ "D4GF5kCHxoJYFiyHA5D+Isr0p1TB0yfk7O8nXzx+8tuTL740JFlIMZd0SaYbDYrcd7YZUXqTw4PuytA6",
+ "KnMdH/3LZ95R2Rw3No4SpUxhSYvuUNYBalUg+xox73Wx1kQzrroCcMjhPAfDyS3aifXtG9BeMGU0rOX0",
+ "IJvRh7CsniUjDpIMdhLTvsurp9mES5QbWR7ClAUphYz41/CIaZGKPFmBVExEblNeuzeIe8Ort0X7dwst",
+ "uaSKmLnR9VtyVCgilKXXfDjft0Ofr3mNm62c3643sjo375B9aSLfexIVKUAmes1JBtNy3rCEZlIsCSUZ",
+ "fogy+nvQqAqcsyWcabosfprNDmMqChwoYrKxJSgzE7FvGL1eQSq4jYTYYZ25UYegp40Y76LT/QA4jJxt",
+ "eIp+xkMc237Ddck4XnqoDU8DK9bAmEM2b5Dl9a3VPnTYqe6pCDgGHS/xMTo6XkCu6XdCnteewO+lKIuD",
+ "K3ntOYcuh7rFOFdKZr71NjTj87wZfTM3sE9ia/woC3ruj69bA0KPFPmSzRc6MCteSyFmh4cxNksMUHxg",
+ "jbLcfNM1zV6JzDATXaoDqGD1YDWHM3Qb8jU6FaUmlHCRAW5+qeLKWU+8Bl4U4/22DvU9vbB21hQMdaW0",
+ "NKstC4K3tx15UX+Y0NSe0ARRo3rurqpLR/uWnc7GAuQSaLYhUwBOxNRdELmrK1wkxatn7dUbpxpG+EUD",
+ "rkKKFJSCLHGOqZ2g+fes6NBb8ISAI8DVLEQJMqPy2sBerHbCeQGbBAMlFLn/wy/qwUeAVwtN8x2IxXdi",
+ "6K3MfHcL2IV62PTbCK49eUh2VALxcoVogdpsDhr6ULgXTnr3rw1RZxevj5YVSLyPu1GK95Ncj4AqUG+Y",
+ "3q8LbVn0hP8589ZoeGbDOOXCK1axwXKqdLKLLZuXGja4WUHACWOcGAfuUbxeUqXtHTLjGbq+rDjBeawS",
+ "ZqboB7jXDDEj/+ItkO7YqZGDXJWqMkdUWRRCashia+Cw3jLXK1hXc4lZMHZl82hBSgW7Ru7DUjC+Q5Zd",
+ "iUUQ1dVViwuy6C4OLySMnN9EUdkAokbENkDO/FsBdsMQqB5AmKoRbQmHqRblVHFX45HSoigMt9BJyavv",
+ "+tB0Zt8+0T/X73aJi+pabmcCFEZeufcd5JcWszb4bUEVcXCQJb0wuge6QexldxdmcxgTxXgKyTbKRxPP",
+ "vBUegZ2HtCzmkmaQZJDTTXfQn+1jYh9vGwB3vDZ3hYbERjHFN72mZB80smVogeOpmPJI8AlJzRE0pkBN",
+ "IO7rHSNngGPHmJOjo3vVUDhXdIv8eLhsu9WREVEaroQ2O+7oAUF2HH0IwD14qIa+Oirw46S2PdtT/BOU",
+ "m6DSI/afZAOqbwn1+HstoMeH6gLEg/PSYu8tDhxlm71sbAcf6TuyPQ7d11RqlrICbZ0fYHNw0689QfSa",
+ "kWSgKcshI8EDawYW4ffExt+0x7yaKTjI99YFv+N8iywnZwpVnibwF7BBm/u1DewMXB2HsGUjoxr5RDlB",
+ "QH24mFHBw1dgTVOdb4yiphewIZcggahyumRa24DtpqmrRZGEA0TvNbbM6C7xbFCk34Eht4pnOFSwvO5W",
+ "jEfWJtgO33nLMGigw9kChRD5AA9ZBxlRCAbFe5BCmF1nLnbcRw97SmoA6Zg23uBW4v+eaqAZV0D+KUqS",
+ "Uo4mV6mh0mmEREUBFUgzg1HBqjldZEeNIchhCdaSxCcPH7YX/vCh23OmyAwufcKFebGNjocP0Y/zWijd",
+ "OFwH8Iea43YaER944WMEn7NC2jxld2SBG3nITr5uDV7dEpkzpZQjXLP8azOA1slcD1l7SCPDoipw3EF3",
+ "OcHQsXXjvp+xZZlTfYhbK1jRPBErkJJlsJOTu4mZ4N+uaP5T9Rkmk0BqaDSFJMUUiIFjwbn5xmZN7LIN",
+ "62gytlxCxqiGfEMKCSnYKH+j8qkKxgmx8X/pgvI5avpSlHMXgGbHQU5dKutTkSXvDBHVhvSaJ+idjnFu",
+ "F3TsEz2MHgTU2GJt17a1PC5pNZ/L7RkiUgPktV390dut8ajXVDVIXdWmqkVOM1tlABdvKGoBfuqJB96B",
+ "IOqM0tLFV7gt5hSYzb0ZX3s9dAzK7sRBSFz9sC8qztjJ+eYA2oodiEgoJCiULaF/SdmnYhZmpjnhozZK",
+ "w7Lrgref/tZz/N70GnqC54xDshQcNtFkbMbhR3wYPU4o33o+Rk2j79u28dCAvwVWc54h1Hhd/OJut09o",
+ "+6pJfSfkoe4y7YCD9fIBV4c778ndlFe94KR5HrkTdHkrbQagxlWePJOEKiVShsrWaabG9qC5a0SX5NJE",
+ "/+sqGvcAZ689buvyK0yJROcu5AWhJM0Zun4FV1qWqX7LKTqXgqVGopa8Fd3vbnzuX4n7NyPuRzfUW04x",
+ "Yq1yOUUjLWYQ8a98B+C9jqqcz0HplpEyA3jL3VuMk5IzjXMtzXFJ7HkpQGLo0MS+uaQbMjM0oQX5A6Qg",
+ "01I31XZMy1Ka5bm7iTPTEDF7y6kmOVClyY+Mn69xOH9b748sB30p5EWFhbh0nwMHxVQSj6763j7FwFe3",
+ "/IULgsU0evvY3t2Y8evcrQ36nurU8P/3/n8d/3qS/C9N/niUfPUfR+/eP/vw4GHnxycfvv76/2v+9PTD",
+ "1w/+6//GdsrDHksacpCfvnAm7ekLtFvqy5sO7LfmuF8ynkSJLAzDaNEWuY8Jso6AHjS9WnoBb7lec0NI",
+ "K5qzzPCWq5BDW8J0zqI9HS2qaWxEy4vl17qnNXANLkMiTKbFGq+sRXUDEuPpeXib6DLu8LzMSm630mvf",
+ "NvvEB4aJ2bhKwbTVWY4J5uctqI9qdH8++eLL0bjOq6uej8Yj9/RdhJJZto5lT2awjhl57oDgwbinSEE3",
+ "CnSceyDs0Rg4G5QRDruE5RSkWrDi9jmF0mwa53A+pt85i9b8lNtge3N+8G5y4648xOz24dYSIINCL2JV",
+ "GxqKGr5V7yZAK16kkGIFfEzYBCZtZ01m7EUXjZcDnWH1ALQ+xRBrqDoHltA8VQRYDxcyyCMSox9UeRy3",
+ "/jAeOeGvDm4OuYFjcLXnrC4i/d9akHvff3tOjhzDVPdsIq8dOki9jJjSLruoEUlkuJmtVWOVvLf8LX8B",
+ "M8aZeX78lmdU06MpVSxVR6UC+Q3NKU9hMhfk2CcsvaCavuUdTau3nFSQKkaKcpqzlFyEBklNnrZESHeE",
+ "t29/pflcvH37rhNU0TUf3FRR/mInSIwiLEqduAIHiYRLKmOXVqpKcMeRbQWTbbNaJVuU1rPpCyi48eM8",
+ "jxaFaie6dpdfFLlZfkCGyqVxmi0jSgvpdRGjoFhocH9fCScYJL30fpVSgSK/L2nxK+P6HUnelo8ePQXS",
+ "yPz83Yl8Q5ObAgZ7V3oTcdtOFVy4NSthrSVNCjqP3Y29ffurBlrg7qO+vEQfR54T/KyRceoj6nGoegEe",
+ "H/0bYOHYO3sOF3dmv/LFrOJLwEe4hfiOUTfqG/ur7leQg3rl7WrlsXZ2qdSLxJzt6KqUIXG/M1WNm7lR",
+ "snwYhWJztFZdOaApkHQB6YWr0wLLQm/Gjc99pI5TND3rYMpW8LEZZFhDAm8WpkDKIqNOFad8007mV6C1",
+ "jwd+AxewORd1CYp9svebyeSq76AipQbapSHW8Ni6Mdqb78LB0LAvCp+Tjcl5niyOK7rw3/QfZKvyHuAQ",
+ "x4iikezchwgqI4iwxN+Dgiss1Ix3LdKPLc9YGVMr+SLVfDzvJ+6V2nhykVvhatDrbp8vAcuBiUtFptTo",
+ "7cJVsrIJ0wEXKxWdQ4+GHF7uDExLblwI4SC75F5U0olZW6B15E0UZPtyYtYcpRQwTwypoDHTitfzM9n7",
+ "Q3czgQUqHcKmOapJVWCjZTpUNi7ZbMW9PtDiBAyS1wqHB6OJkVCzWVDli2xhLTJ/lgfpADdYAGBb2ZfT",
+ "INQsKDhWFXXxPLd9TjvWpSv+4iu++DIvoWk5oGSL0fAxuj22HYKjApRBDnO7cPuyJ5S6GEG9QQaOn2az",
+ "nHEgSSxqLXCDBmLGzQFGP35IiPXAk8EjxMg4ABvvxXFg8kqEZ5PP9wGSu2IK1I+NN+rB3xDP+7Jx3Ebl",
+ "EYVh4aznViv1HIC6UMdKfrUCbnEYwviYGDa3orlhc87iqwfpVB9BtbVVa8RFZjzoU2e3XIBYwbLXmqwo",
+ "uspqQp3JAx1X6LZAPBXrxCZ+RjXe6Xpq6D0a2o5pqLGDaeu83FNkKtYY7YOixYZS74ClHw4PRmDhr5lC",
+ "esXv+qS5BWbbtNu1qRgVKiQZ586ryKVPnRgydY8G00cu94PSLVcCoOXsqOsgO+N3p5HaVE+6wryWauO6",
+ "JJnPGood/74jFN2lHvx1vTBVsZXXbY0l6qdoBq0068wEKmSM6A2b6F7SdK+CFOSARkHSUKKSi9jNqbFt",
+ "ACXOmf8scF5gNRvKNw+CSCgJc6Y01E50HyfxMdyTFIvoCTHrX50u5Mys740QlZiy14j4YWOZt74CDCWe",
+ "Mal0gjcQ0SWYl75TaFR/Z16N60rNWCtbcpZlcd6A017AJslYXsbp1c37wwsz7auKJapyivyWcRuwMsUS",
+ "ydEIzC1T2yDdrQt+aRf8kh5svcNOg3nVTCwNuTTn+EzORYvzbmMHEQKMEUd313pRuoVBBpmzXe4Y6E3B",
+ "Hf9km/e1c5gyP/bOqB2fv9sno+xI0bUEDoOtq2B4TWTUEqaDCsPdlNaeM0CLgmXrli/UjtprMdO9HB6+",
+ "LlsLC7i7brAdGAj8nrGsGgmqWYKvVvBtrehGBZzJIMycNwvlhQwhnIop3+mgi6gq624Xrs6B5j/A5hfz",
+ "Li5n9GE8up7rNIZrN+IOXL+utjeKZ7yat660xk3IniinRSHFiuaJczD3kaYUK0ea+Lr3R98yq4u7Mc+/",
+ "PXn52oH/YTxKc6AyqVSF3lXhe8Vnsypb7a/ngPhK6sbm8zq7VSWDza9KlIVO6csFuJLUgTbaqZ1ZXzgE",
+ "R9E5qWfxCKGdLmd3N2KXuOWOBIrqiqR239kbkuatCF1Rlnu/mYe2J5oHFzesAGuUK4QDXPt2JbgkSw7K",
+ "bjqnO346aurawZPCubYUzV7auvCKCN6+QseY503hbt2XFCtfWq9IlznxcomehETlLI37WPlUGeLg9u7M",
+ "vEzw5R5l1IxYsp6rWF6yYCzz2pDaNi0ggzmiyFTR8jo17qbC9fwpOft3CYRlwLV5JPFUtg4qlklx3vau",
+ "ODW6Q3cuN7D10NfDX0fHCKu+tiUeArFdwQhv6jrgvqhMZr/QyiNlfgiuJPa48A9n7IjELZf1jj4cNdvg",
+ "xUXzxi1s0dPlf4YwbK323f2BvPHqys/2zBHt98NUMpPiD4jbeWgeRxKWfJ1bhlEuf0CY6BB2uWiwmMq7",
+ "U7ctqmfv3e4+7Sb0QjWDFHqoHnc+uJbDgpveQ0253WqbSNKIdYsTTBhVemTHrwnGwdyJxM3p5ZTGqpEa",
+ "JcPAdFJfADd86VoQ/7HHvaqyLezsJLhLrt5lNhm9AFnnEnYL21xRYbDTDlYVas0AqTbUCcb2/i9XIjJM",
+ "yS8pt11czHf2KLmvFVjnl/nqUkgsJaHibv8MUrakeVxzyNKuizdjc2YblJQKgg4YbiDb/MlSkesiUuUQ",
+ "OdSczsijcdCGx+1GxlZMsWkO+MZj+8aUKuTklSOq+sQsD7heKHz9yYDXFyXPJGR6oSxilSCVUofmTXV5",
+ "NQV9CcDJI3zv8VfkPl7bKbaCBwaLTj6Pjh9/hU5X+8ejmABwDWa2cZMM2ck/HDuJ0zHeW9oxDON2o06i",
+ "Wfe2w1w/49pymuynQ84Svul43e6ztKScziEeKbLcAZP9FncTHWktvPDMtkdSWooNYTo+P2hq+FNP9Llh",
+ "fxYMkorlkumlu9xRYmnoqW5vYSf1w9leS64ysYfLP8Q70sJfEbWMyNt1mlr5Fls13mS/oktoonVMqK0f",
+ "krM6esHXSyenvjwRlmquKjRb3Ji5zNJRzcFghhkpJOMaDYtSz5K/kXRBJU0N+5v0gZtMv3wWKU/dLJPK",
+ "9wP81vEuQYFcxVEve8je6xDuW3KfC54sDUfJHtTZHsGp7L3MjV/b9d0dbh96qFJmRkl6ya1skBsNOPW1",
+ "CI9vGfCapFitZy963Htlt06ZpYyTBy3NDv385qXTMpZCxmoO1sfdaRwStGSwwti9+CaZMa+5FzIftAvX",
+ "gf7j3jx4lTNQy/xZjhoCq+Uv3i3bG7NvVPhffnTtFDu6d0+cgQ0kqL655VyEaEiS1dAwjI/gqsnvj38n",
+ "EmauQeLDhwj0w4djp8z9/qT52DKphw/jlXiiPg3za42FvVhhu1KB+Ta2h9+IiIfBl72vbkNcvkHEw9PH",
+ "as0Dc5SnbqgxaZYYv31ZeJhItvhtZfwUvH37Kz7xeMA/2oj4yEceN7COx7Ar6SGUoMVClGSy6nkQJ0HJ",
+ "N2I9lHBanNQTzyeAoihKSpZnv9TZuy3WJilPF9F7z6n58Le61161OHt4oyUgF5RzyKPDWZvhN29bRKyf",
+ "f4mh8ywZH/huu6mGXW5rcTXgTTA9UH5Cg16mczNBiNVmYmQVeJ/PRUZwnrreYH1cu81YgpL5/y5B6ZjA",
+ "wgc2+A/924Yd2IrtBHiGXoUJ+d62014AaRSTQmveV/toZr6XRS5oNsYqJOffnrwkdlb7je0YZSvGz9GY",
+ "ba6i5dcMSqkOCyP3zZ/iKS7Dx9kec29WrXRSFXiPJRGbN+oS9Kx114NmboidCXkRNMa1+cZmCIJFaOTS",
+ "WObVaFbHRZow/9Gapgs03RustZ/kh7c68FSpgvaiVZuwqr4onjsDt+t2YJsdjInQC5CXTNkuyrCCZt5y",
+ "lcTvXEc+j7m5PFlybillsoeUq6qJ7ot2D5wVkf46KApZC/F7Gm62U8i+nR/O8KtoubN2G4lOX1GbBVu1",
+ "f/Ld8VPKBWcpFhuLiWjXbnnIXemAumxtZ7w/4u6ERg5XtHlFFU7psNjbzsIzQoe47mVN8NRsqqUO+6fG",
+ "vr4LqskctHKcDbKx78Hi/MWMK3D1YrE5d8AnhWzcPyOHjIY0JNXV155khOlTPQ6A78yzV849hHkFF4yj",
+ "IejQ5hQ/69HFbrDaWI9Mk7kA5dbTzCFXv5pvJphOncH63cR3j8Ux7PWtWbaNVegOdeIjF1ykgHn3uXnX",
+ "Fbmqfm5EqttJT4rCTdrfoSeqD+g170Vw5AY68VeAAXKr8cPRtpDb1pAjlKeG0GCFAQtQoBzuEEbVrabV",
+ "Cc0orZai8A1iQ/2ilS4Yj4DxknGoextHBEQaFQm4MXhee75TqaTaqoCDeNo50Nwa1BGGprS7orruUO0S",
+ "XwYluEY/R/821o12ehhH9UKtuFG+qVoqG+oOlInn2MvdIbLbNge1KqdEZZh50mqkE2MchnH7Vl1NAdBj",
+ "5zd0Ivs51rvbVxL1JRNPy2wOOqFZFivf+w0+JfiUZCVqDrCGtKzKvBYFSbF2TrOYUJfa3ESp4KpcbpnL",
+ "v3DN6YLOVBFqCLtj+R3GZKXpBv+N1Tjt3xkXrLN3uKiPzMn2q6DVDX+Nab2GphPF5slwTKBMuT466qmv",
+ "Ruj19wel9FzMm4B8DLddD5cL9yjG3741giOssNEp3GtFS1UAA4Mzhe8nimZjlbrd5EooyjqVfPFSsOpX",
+ "uN0B0d95cIzCrydEO3TCWvlqHZN9gdppb14B1S7DUVOylQX1Zo3ZKK+WW7frYe+L7LKBXYdzh7q1bkWo",
+ "DxnsAvSDj0cmBWUuhKJmFl3MusyFbi7JkJjmeoPbi3D5AL0eux9WfbH7vqAePm93JrsAV/agkLBiovTB",
+ "CT56zZuE9tdGn68qeyK6/q7jFaf6uO7QXuftuesQYZfpbPIffrGxjgS4lptPwJXb2fROz7OutmvdU/Ur",
+ "pCouPqjYeEMqDik2Gatr6HTDRte1HT3jOmT1Yog60O0BNx6dZnsJzFhtzJEdJXbs4h3d+kuH1eXC8IgV",
+ "QrG6xn+s1dvAMNFz7NYWlD7rjuVjtFaQamzsUMeeSIB9CqGZyYLmsXclxHrM6Sqa1lUO21YurNvNYYeM",
+ "72T0BVmpthL+ZHhxrJMqwhD5NFa0ngN3/VubuTqDMwZmM0g1W+3IoPzHAniQnTf2fhnbhz1IqGRVBDoW",
+ "4Nnf61gDtC3BcSs8QSHMa4PTlz91AZt7ijSoIVqaf+xF7VVqryAGkDskhkSEikXwWEeyC6pgqqIMxIKP",
+ "mLOfQ13FrrerV5APfMW5PEkawVHnCG+ZMt5WaNBc5tO9MucxmLovybLblaTf/niBTWBU1XHT124JrXRy",
+ "2q1weelqv2C+a3V34qvAgPK/+eR2O0vOLiDsO4Y3VZdUZv6NqOvFe3WSLfKokxnpO2q0gZ5VM7M6vrmb",
+ "CxepmYZR7GkujBqR9KUCNEOKq3ice8oGTtkS/hgsbeCagXT9GVH/zYWCRAsfD70Njm2osNFhV0KC6q1T",
+ "aoHrrR70pi6PhPWaKVYLoi4oLFwgkbCkBjoZFDHqn3Mbsp/b5z75y9fr3elhquh1d+MIH9nOVAeJIdXP",
+ "iJOWu5PKruJsYpzbHuAqVtGIg2zehhRSZGVqBXR4MCqH3OB6YVtYSdRPk3ZX2bIRgszcC9gcWSPId9zw",
+ "OxgCbTUnC3pQCaO1yQd1v6kY3PODgPcxPVfjUSFEnvRcdpx2yzC1Kf6CpReQESMpfARoTxckch997NVt",
+ "9uVi48sOFQVwyB5MCDnhNubeX2w364C3Juf39Lb51zhrVtrKaM6pNnnL48HLWLNMXpOb+WG28zAFhtVd",
+ "cyo7yI4iP+ueElCSXkZ6gk2GWuXdq+Z2n6aaqCwUMZ3kzN5YPceDHnMcXUqmwQU2WCFuNpK4my6ichEL",
+ "EoTLYfn7VUCp2ZFc9AjucDIESAMfkudZQeEGjyKg6sG0I1CoihGq29fUcUJd9SjPxWWCxyipitjFjC7z",
+ "XlNK+LK99WeG3KYQBBxR5TSIDVnQjKRCSkjDL+J5OhaopZCQ5ALjj2JXozNtFMIlBudzkos5EYWx820t",
+ "SH+JFO2tFMx1qD5SNufcQpDYG6+eqh6gXI65A9e+3IV3Syun/dtEnS8ijivcML9be/eCcgS3dwuXAMwB",
+ "hL7baXcSa3XVXFe76VpfC0QtliyNo/vzCtfpDbKJUW8MFa6Kss3ixNfwgIc8pbqdxdPTRTNwOs2jvNod",
+ "P3dLhXRu/osivD0umYFjLj38LNKz2bLhJO0VFi0AEFKbWqRLaUsvh6y8augm5jYVEe/Y2oAOZDgYynA9",
+ "2MwIhwTqw3ZCiXV8ixyEandcQzqfS91zqKJBEttjEmwX0OnQyISq0vxA/hkA0B+r0IBhUMTCvmDMsKtu",
+ "QiNIPq3sxHGj6TlrCQlfBdQyw5RaP9ECiBm7lOBye237z1a/sYLqhdcbzetdbw7PYA0KE29t0ySqrO/R",
+ "+0Bd79G2Qi6KJIcVNEI4XMJxmaagFFtB2LfUfkwygAJvBNp2aiw2IRSHLePFrT0JbreHYDdqzVjE2p0i",
+ "O0yVqGG15ok9JmroUTIQrVhW0gb+1DU6OPY1b4zIaw/ru2GcYm8mEV/cNhaxM5oIaT56Lnk8mCjMd6/c",
+ "kDhbVl1XWCKsT7Yq6CXvN9u7RFmrm8N7nwaI/XYNKYruZrTM9XFCcDCiWrUsevVMWe3wVd0/vVS2jcg6",
+ "nWDjdhj4Tt5h2SlvK7hvI6LROqqZigzAVM0bMPYW6tjO4LUl3ZCMzWYg7VWc0pRnVGbh64yTFKSmjJNL",
+ "ulFXt8kMtLKE8U6zzHBqHNQzq5iBhl5lC0i+cQZ/n8k0wNTBe9eImWPFthZ9TWo7uxJPBqJrYxpiVGQP",
+ "EbhSFGgY2sMqOGrlZEkvYM95FPsDtk+DBaKc514LnHXIFB+20vpPiDo88D9zprdSu9X32mGq9h7REqOn",
+ "QT6vgxns5nRpMBZZfG5bpYXRxe3OI36vrVPTzgc9lVSbanrPLqJbx4Wlhzq5Gm6uNjxHsfhly8MT5O1q",
+ "S7gCqKBXW+rczV21pCMULFLGLvp7T63Fmgs0y1hfa/wFuHLl7mw1p61cgGac4Z7uwN8Vh6gQRZIOucPK",
+ "IAfDaqzV4iBtwjjAR1akO8RCVEj2cKWmiSRmyB/wWFjVAKN9KoE4bsehNZWA6uBh3+W0lKjGXtLN7pKY",
+ "tSIQD+G3I3sb3EcmVVC7DbZHXNlWPtGKk/soiBGuE+tm0631d/jF2NyU+vb85pbj7sfiCzjhzlDCHoXb",
+ "6K02pTypRGiN8k2MafgboCsssE8/HBBdfbCtqk7LTWxQVEherQT0INC6kbYRbAY927cHP4UV4uuyBdIG",
+ "bGOwhLdI2/zix9pSHdY93n+wA7wwJi7oH++vJx04Hzn//8cKKcFS3vVRQmP5u8Ls3AJr0z7YIqctaw22",
+ "X4fNGW3uSxBDqZ5XoYk9orkTwYjl4I16lueRyEerwNvm4gHhGLkoVzS//ehF7BNwgviA7E1/vEMY/hYi",
+ "2aJSXS359iUdNHcQ6na4qflrjLb8B5g9iooFN5TzGXSYP5pfNLdXUzPfaXgFnFzimNZj+/hLMnUFpgoJ",
+ "KVNtX8SlbwJYRXthT1yX8LzWO8LLdq3zF6GvQcYz79ojr+qGYnj7Muc1hPUR/chMpefkRqk8Rn0dsojg",
+ "L8ajwkrPO8TFRSOHo9bqAokmJBw4lyPIytwzl6Nbw3ro8my+ghE6pYLuOgdL6wZuI4K6XtvQRKTB1aCw",
+ "29OQ/KF45SbzOSYwHaSE014FnG4gdcniyI3h5o1RzC99xSxswYaeuimt/ShZnu0ijEYVnA9Vj3ys8/Kb",
+ "q5d2u7LUQ2DDqbtH1bWsvkYOiEVMZK2NyYOpgvo2A0rbuM8ihWwwVCktJdMbLOPuLV72WzTJ6vsqYN8l",
+ "fFROVCf7tLiAqhFAHd5fKi9dvxc0R3lkfbvcSCGRT8i3a7oscucTIV/fm/4nPP3bs+zR08f/Of3boy8e",
+ "pfDsi68ePaJfPaOPv3r6GJ787Ytnj+Dx7Muvpk+yJ8+eTJ89efblF1+lT589nj778qv/vGf4kAHZAjry",
+ "RUNH/5Oc5HORnLw+Tc4NsDVOaMF+gI1tX27I2DdGpymeRFhSlo+O/U//jz9hk1Qs6+H9ryNXk3C00LpQ",
+ "x0dHl5eXk/CToznG8yZalOniyM/T6Zx+8vq0uje31y64o1XElI3FcaRwgs/efHt2Tk5en05qghkdjx5N",
+ "Hk0em/FFAZwWbHQ8eoo/4elZ4L4fOWIbHb//MB4dLYDmmP5i/liCliz1jyTQbOP+ry7pfA5y4rrFm59W",
+ "T468WnH03sU1fzAzRJ3OtgpSUPqm20Td5Uig58ZeqTeakirXI3Nctap1t3s8w+I0NlTYsLkKcadZ3ZPt",
+ "tGZavjK9bdVz/Gsk18xHVfiC6Y1G9i4Cgyny32c/vSJCEmfevKbpRRVRQk5ntsqwFCuGNU+yoFCO+XLi",
+ "6fffJchNTV+O84VtaHznUReaslTzoll2odaqYk6SWMN6nNmQRUDYVRZCzbjwHiOApGbDhrU+Sr569/6L",
+ "v30YDQAEU2IUYJHi32me/04uGfY9xws9X+bflXEeR7psojY9rqPa8YN6J8fowKmeho3Uq3ea1Yp+54LD",
+ "733b4ACL7gPNc/Oi4BDbg3dYRheJBc/ck0ePPKNxanwA3ZE7U0ObDvkCXTYsohrFk8QVBuoyJPvoTZW4",
+ "Lmlhz6J7YkMZnWPVvjQxfOfZARfaTK+/9nLbw3UW/Q3NsLk1KG2X8vizXcopx6w0IyCIFYAfxqMvPuO9",
+ "OeWG59Cc4JtBjfquoPmZX3Bxyf2bRvkpl0sqN6jaBB34W8X/6FzhbQaySHu2Gz23R+8+9Eq9o7Cl8NH7",
+ "RmJTdi2Z2Ommfvpih5i8p/o4Z7fDU6tjsXleNaTFSzHXlhlb5KoHE/J9+DVybyyYbMsRl5JD5vOSvNSr",
+ "OkD4vhI1bPdUWEs6KrQDd/Gd/P7Y8vuk6exodBGKAdM4BVth6twwXleAdmOTggSmPUpXBr0PfXcV2zv4",
+ "Ch0Yb7QxfsvWtDO9i5mCOxn1He56cNenJgXwVhpTs+fzzbNmXwejkiQNkXGDjPszV/p+pLmhk2C5rXqT",
+ "trXWnTL4l1EGq3z5udXOXDfJ66mH2Ff+6L1vl3YAldC1ixugDIZmdfBtEBp5v8VOHkxs77PwnavxDJcg",
+ "v1PNwyZ2dwreJ6DgdRtExsCo2/59PKUOYVjUHSR3Nqv0vR9DbcR35hzc6fIz1eL+wsjqVdsMpLsVtiuw",
+ "z44y5pj1jbHVP6US5pB2p379pdWvqmzNtRSwRotXVwgpuMa6lveu7Z1jutLEmqWLAs6GOUyGobgjPK6D",
+ "gw2LsdG1Lq5Wjb1liNep1mi0mzXu2I1dFet7CA3UbzanL3ZpV5+Rn2dwB5KIFIjvzU3z0ui1w5vbuXYY",
+ "xpuePXp2exCEu/BKaPIdSvEb5pA3ytLiZLUvC9vGkY6mtv/aNq7EW2wJGUXdVy3gUVXNtnHw3LxtozTu",
+ "YzJYs2btgwnx3d5U1X/YZVLPhWFUPgGDyrn9yPA6gwxyz/95jOPfm5DvMFVHqzEGm2nXnJbcY1wfP37y",
+ "9Jl7RdJLG8vVfm/65bPjk6+/dq/V/RmtndN5XWl5vIA8F+4DJyO645oHx//zz/+dTCb3drJVsf5m88o2",
+ "ufhUeOs4VsCgIoC+3frMNylmrft2dbtQdyvX99+IdVQKiPWdFPpoUshg/08hfaZNMnKGaOXJbJTBPKA0",
+ "ssdkH3k09n3sDN+phMmEvBKuInGZU0mEzEC6hu3zkkrKNUA28ZSKVR6UrcCa5gyzXCXBFtQyUSyDuj5O",
+ "lWNeSFhhjHxVs6UJwW5Gj5G0nyyT/5GugwzPaSWmtXBLRrfnkq59E3xs8ywk/vT11+TRuLZe8twMkFSI",
+ "iTHXJV2PbtHrVxHboPjzZv/RnQG6OPYQD1Kt/VRFK8Jmh39tzv3Zau6W3N3GHohz7n3xU1/shH4EV/d3",
+ "qwfBKna2RT72bN/U5W+MludVqDiLMzMMdQ58wncEO13TUSO0jd67Q3znBLgWK2kT1J5sA7NO1dF7tMtD",
+ "ntE5t5g199e6Lg3ujqRY+ssjQWag04VL2G2hPsKefPfTft60ZJwtDZSPxjeu1eAudktDhW1XMmrT5IdU",
+ "9g1yKfECD2SEiH/yjcjMYzazFd18qcxz160Cr6Zcya2q14E1vm33ExfP7/N6C9ro3bAbyuf15F2FDNFy",
+ "iPvPOwTvh+AOc/zWd9dHjLlF/Bki/r0pmZBXok4bd41d/4xXjzcp2W96Qa8EB3vHbjRfS4t316mV2mEY",
+ "h0WKrxdi7Zeqxd6VVZCjBVWLnXrI381LO3SRIdLbTPZZivC/OyxtkTJmbZOdxRDq0YYwZ/OiLRXZbPr2",
+ "Ea2Yj8JPP0HT5mNwrNthMXhIPZ9xagE/LNPBEjyWmI+qfl99HCjeQnEwN9KiCkOLdj2cQi74XH2arGhr",
+ "M8soXiJUUjWXjHeQ/Oud3edY3ceYvDYC0tV7UoynQJRYgm3hzBRZMqVcsOSzR3+7PQg1W/qmOTzMXf3I",
+ "3OWLR09vb/ozkCuWAjmHZSEklSzfkJ85XVGWY5ODa3A77I9Z1V/z3uBoS1S8bWrWBUvDIkZXZ4KN0LX3",
+ "es2yD7uZYVB3cE8+yHjAB8MawLQogMqrM8DdV1ftRiinL8Lo4EbbxqqiVgQUg6I9A+T/YzTQ74Rp72Lm",
+ "hF/JLaC++pdjEy50V8zGVXCM0QLE7Ji85Q+JWtAvHj/57ckXX/o/n3zxZY/nzMzjivZ0fWf1QOaxHWaI",
+ "A+2zdgceVmuv8Ht827u93yaORyxbRxu71a2aO10mnFp2T5GCbnq7PxY7Wk2Hw9Ztp2+/2KHSbLqI2lfe",
+ "/Kka/pzybyor2Fbkcx2a71pM9yRPBHzGEFrda7rC+va201u0yRZZVv19b9s4rZMMrKDzyJMtmfNRFV39",
+ "sYzUBG1U4F6xaaLl4+mU2HxwHFx3F1JokYrcxq6URSGkrk63mgxS96Dv2q6h7fUR7l7KXEp1uiiLo/f4",
+ "H6zw9aFOPMDax+pIr/kRtlg4er81RABBzM1Zl7ZsckMvjfYw6prJ+Hldovk7ITt93HaFALROzLh9iGy7",
+ "CIwliOhnN6Od/aWVmq32f2vDr+/SjozYOcBVXl1QoL+i3aDwt0+Vsy0vIiR8dwXzaS2odorMGM8IDbax",
+ "ZbsJWTOCG3aM3PSiP4af5fbvnb74jM/ZK6HJ6bKwHeogu170DmlzOC89torb/RQDJ/q7IT5dmR9KfB+Y",
+ "WHnXdwr4PS7kglRs8NNRibnRRlbfjO/7TpJ/2pL8uS853CDDO7n8+chl6cMp70Twpy+Cn362q7nBi5iB",
+ "ItlLoiuL4doS31MgR/q3o8ugdRW+7Z4GTe/2KtV3Qvr2FndS/DO9ZLA7OThpaYiHZlcqk5vyEKGznxT0",
+ "w/wMeR7xNPQd1LHt9aMXwLDojEgZ1g8/zdTYHmLnnHCn+E7x+aQVn2Cv7/SeO9fDZ+Z66NFynNXf7Ire",
+ "p2jsqwCtliIDH3UiZjNX5K1P+2n2njHkqTRdFsR+GdVy8Db2nC3hzLz5k53ioCK2BrulFrXAM8hSkAqe",
+ "qQG3om7Uq8ohvMbtB+DWb0CrHfCwuPTvyZVJ9k1QQ6ZDCaSNfIU9g3yxO4eMDFZk6RsNX5Nsj97bf9Gd",
+ "VggVWc2ZJ+DOxtx322Kr99lxGwCS16iEumbE7isxI49sEb+SY6ZO3RyQ8oxouTGKqq9ZIoHmJG1E6Fdw",
+ "dE/OWe/J2WkKdFbXs6a4LSDqE3rIcNZWdtQPt34AnlPuSL6LIC0IJRzmVLMV+Lj1yV1G/ZWlmctn38IA",
+ "x4RmmT2N9SbACuSGqHKqjK7Dm4GW91TzvOzBMGBdgGRGRNO8voC3ZsKRTZffFlB5Zt+4ptBq8SKbpC+b",
+ "UUBesroUfjEjP7JUipN8LpSP61IbpWHZab3nPv2tp+iqdyR0Y8AEzxmHZCl4rCHcT/j0R3wY+xpLDvR9",
+ "fG4e9n3bkrdN+FtgNecZIpOvi99P5PRfK1ejtVoJhZDGup3aJrWW/vc8Sv7QbHjaPUkbngaXWu5hMFDY",
+ "Pq7x89H7xp+uWIZ7Uy1KnYnL4Fu07G3Qz5A8+aBR9RU8aa2Gz+pmfWk3eYcU4CF2YqqnkdZfQTvy3u5f",
+ "f9H8EHflEhKJa9G/Aqla5tldksifKklk8L7vxWNtq8tdHK1Uh9VIXokM7LjNTrOx+sxcZOA6cnYVkSrY",
+ "MR5Y76VS/V4r1Dml5XyhSVkQLWJB1fWHCU0tk02seROfMKiIZo0gnG5BV0Bojn1OyRSAEzE1i67lIy6S",
+ "KqxJ5yOzXUhnVBUK4CqkSEEpyBJfj3oXaFWfU4zj1lvwhIAjwNUsRAkyo/LawF6sdsJZ9QlX5P4PvxiD",
+ "+dbhtargdsTaSlgR9FbVNpy214V62PTbCK49eUh2VALxqgEmkohlkYNLJYmgcC+c9O5fG6LOLl4fLZhr",
+ "wW6Y4v0k1yOgCtQbpvfrQlsWiZHfXRCf26fnbImaGKdceL9ibLCcKp3sYsvmpXAtyqwg4IQxTowD9xic",
+ "L6nSb1xWYYYVaKw4wXmsjm2m6Ad41deP3oz8S9WNvjN2auQhV6WqWta7TAHIYmvgsN4y1ytYV3NhWqcf",
+ "u0pFsB6+XSP3YSkY3yErKMpNqA5u881wkcWh/5E6B0UXlQ0gakRsA+TMvxVgN7zG7wGEqRrRlnCwyGhI",
+ "OVMhcqDcZnSJojDcQiclr77rQ9OZfftE/1y/2yUuqmu5nQlQYZqIg/zSYlahg3ZBFXFwkCW9cJkkc9dk",
+ "qQuzOYwJZoAn2ygfXbbmrfAI7DykZTGXNIMkg5xGXCk/28fEPt42AO64J89kJTQkU5gJCfFNrylZ9rqI",
+ "qqEFjqdiyiPBJyQ1R9AYzzWBuK93jJwBjh1jTo6O7lVD4VzRLfLj4bLtVve4pcwYZscdPSDIjqMPAbgH",
+ "D9XQV0cFfpzU7oP2FP8E5Sao9Ij9J9mA6ltCPf5eC2i780IB1pAULfbe4sBRttnLxnbwkb4jG3MgfpbO",
+ "/nbs0g1Wf2k6UAMDcHIV4/bokjKdzIS0inRCZxrkzoD4f1Dmr8Pd1YAWrjYBwRGc3HTjIJMPW104LmJB",
+ "IE5cGBLp3r+Zqb4TclCJzWYhGco0KblmeVBmvDKVPz2H4Z0T4M4JcOcEuHMC3DkB7pwAd06AOyfAnRPg",
+ "zglw5wS4cwL8dZ0AH6tobuI1Dl9KjAuetKMSyV1U4p+qyGQlq7xTAt0Yl5Rp1zXT5/u7J9ersauB5ogD",
+ "lkN/nLQN3zz/9uQlUaKUKZDUQMg4KXJqbANY66qHW7M7qO9bbBtB2sajVMHTJ+Ts7ye+Ft7C1Wxrvnv/",
+ "xPX/VnqTwwPXJQF4ZlVR3y4BuEG665ZAvUzwvd5c5zuWY4y5It/i2y9gBbkoQNoyW0TLMuLyOQeaP3e4",
+ "2eHx+YeZ3AWt/m5G+33ccDQ5tC1p4fV8v1aqCLW5i+RFkM34+4zmCn7vS2i04y1pEWu3Vkk+6wtCbvKN",
+ "yDatE2J27Qg3sHk26op4jFO5idRb6iYTtElDC8OvHGF1nVkfDl63sUu0XTLbRWExdV2Cip7jbVQeLVhY",
+ "bVhnKJvyOmvRySiWrdmu0jeqABwSAnuOCQd2T8gb+93HrQqPELkjVjPzTyZysPlmxTTwXWNFONbzuUbl",
+ "e8RHTy+e/bEh7KxMgTCtiC/9uFu8jEfrxIw0B544BpRMRbZJGuxr1JBCGVNUKVhOd0uikH+6BsNO+Jgn",
+ "2+XUxxEjL4LFbePJIdGsE8eAe7jzRsNg3lxhC0d07DnA+E2z6D42GoJAHH+KeZVavG9fpldPs7ljfHeM",
+ "LziNLY2AcVcqt81EJjfI+ORGlryf5327hrQ0wIUn+T665/FODta6cbGZwbScz7FRcueSziwNcDwm+Edi",
+ "hXa5Q7ngfhRkB6+aZ1433bs9XJe7BBnY932Nwwe4HZRv8DZjWVC+8Xe+kCi2LHOLQ9tj7rCM1laz7UYC",
+ "4H2sc/71ubVfe59f4Lx1orb5u0ULuaSK2P2FjJQ8c7lDnZrXaz68Yogd+nzNaza9tTqIXW9kdW7eISLC",
+ "73IzaVuRAmSi19weqGYndVtb257cyV2D2L+G2LAp39DDYLt1omuGcCDpIQO+huIj6AZSJ8M1eoSg16I/",
+ "dSRsDWLfPGj0SGf4ZhBJ7VJxl6SQF4T67v2p4ErLMtVvOcVLmmBhk26AifdG9/O35/6V+D1h5BrPDfWW",
+ "U2zuXl3dRPncDCL3FN8BeDaqyvkclOGVIZHMAN5y9xbjpOTG0hIzsmSpFIlNRDVnyOgnE/vmkm7IDOt/",
+ "CPIHSEGmRrIHu24dxkqzPHcRLWYaImZvOdUkB6o0+ZEZLmuG88UHqlAu0JdCXlRYiHeKmAMHxVQSd758",
+ "b59iMwa3fO/kQ4elfVwXUb/dLgwedpb1Qn76wsBNsXZxzpSugyA6sN/aBfiS8SRKZOcLIC4mrE1b5D5W",
+ "THME9KB5O6QX8JYbCacFQa5O9dXIoX3N0zmL9nS0qKaxEa3bIL/WQSbeQbgMiTCZu6uVP1FqZkAH/voS",
+ "N95Wo2/t/Z7XKA2RCzwzT3sEsn3qmnf1vOSMhIYjrFUOxr1x3gD5z9v4/d3N2IsejQezGLsDdtlVsz0T",
+ "4s1v+JjQXPC5rUJoLEiB+8R4UWoMrL5JJx2saJ6IFUjJMlADV8oE/3ZF85+qzz6MR7CGNNGSppBYr8FQ",
+ "rJ2bbyyd7hKkQZO65RIyRjXkG1JISCGz9baYIrWxPbEVC0i6oHyOMleKcr6wr9lxLkFC1c/L2LftIeL1",
+ "TtY8sbXXujCeEOuoDMvTAk0Xkf4oKJmMQe0pwZaTGGIyR1gBVtbss6DHo14N2SB1VQe2WeQ0+cMA8d8Q",
+ "5AF+6okPUYr0jlrvqPWjUWus5B+ibtbyAVh8hdtyw86imy5weYu+p49S/fauhPyfvYS850CKUCJpQ+uP",
+ "9y6jijBNLrHAzxSIETwl+rxdi3NnIU+IYUiBf99WglSu82a6oIy76jBVugDCoV13YO3bEd6Iu9AyM/QT",
+ "GnRAWkqmN2gn0IL9dgHm/++Moq1ArrwJUcp8dDxaaF0cHx3lIqX5Qih9NPowDp+p1sN3FfzvvfZfSLYy",
+ "Fs2Hdx/+/wAAAP//3P3na2WCAQA=",
}
// GetSwagger returns the content of the embedded swagger specification file
diff --git a/daemon/algod/api/server/v2/generated/participating/private/routes.go b/daemon/algod/api/server/v2/generated/participating/private/routes.go
index 1e585cd01..77ebb098c 100644
--- a/daemon/algod/api/server/v2/generated/participating/private/routes.go
+++ b/daemon/algod/api/server/v2/generated/participating/private/routes.go
@@ -158,183 +158,192 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9a3PcNrLoX0HNOVV+3KFGfiS7VlXqXNlOsrpxHJelZO9Z2zfBkD0zWJEAFwBHM/HV",
- "fz+FBkCCJDjDkRR7U+VPtoZ4NBqNRr/Q/XGSiqIUHLhWk5OPk5JKWoAGiX/RNBUV1wnLzF8ZqFSyUjPB",
- "Jyf+G1FaMr6cTCfM/FpSvZpMJ5wW0LQx/acTCf+qmIRscqJlBdOJSldQUDOw3pamdT3SJlmKxA1xaoc4",
- "ezm53vGBZpkEpfpQ/sTzLWE8zasMiJaUK5qaT4pcMb0iesUUcZ0J40RwIGJB9KrVmCwY5Jk68ov8VwVy",
- "G6zSTT68pOsGxESKHPpwvhDFnHHwUEENVL0hRAuSwQIbragmZgYDq2+oBVFAZboiCyH3gGqBCOEFXhWT",
- "k3cTBTwDibuVAlvjfxcS4HdINJVL0JMP09jiFhpkolkRWdqZw74EVeVaEWyLa1yyNXBieh2RHyulyRwI",
- "5eTtdy/IkydPnpmFFFRryByRDa6qmT1ck+0+OZlkVIP/3Kc1mi+FpDxL6vZvv3uB85+7BY5tRZWC+GE5",
- "NV/I2cuhBfiOERJiXMMS96FF/aZH5FA0P89hISSM3BPb+E43JZz/s+5KSnW6KgXjOrIvBL8S+znKw4Lu",
- "u3hYDUCrfWkwJc2g746TZx8+Ppo+Or7+j3enyT/cn189uR65/Bf1uHswEG2YVlICT7fJUgLF07KivI+P",
- "t44e1EpUeUZWdI2bTwtk9a4vMX0t61zTvDJ0wlIpTvOlUIQ6MspgQatcEz8xqXhu2JQZzVE7YYqUUqxZ",
- "BtnUcN+rFUtXJKXKDoHtyBXLc0ODlYJsiNbiq9txmK5DlBi4boQPXNC/LzKade3BBGyQGyRpLhQkWuy5",
- "nvyNQ3lGwguluavUYZcVuVgBwcnNB3vZIu64oek83xKN+5oRqggl/mqaErYgW1GRK9ycnF1if7cag7WC",
- "GKTh5rTuUXN4h9DXQ0YEeXMhcqAckefPXR9lfMGWlQRFrlagV+7Ok6BKwRUQMf8npNps+/85/+k1EZL8",
- "CErRJbyh6SUBnooMsiNytiBc6IA0HC0hDk3PoXU4uGKX/D+VMDRRqGVJ08v4jZ6zgkVW9SPdsKIqCK+K",
- "OUizpf4K0YJI0JXkQwDZEfeQYkE3/UkvZMVT3P9m2pYsZ6iNqTKnW0RYQTffHE8dOIrQPCcl8IzxJdEb",
- "PijHmbn3g5dIUfFshJijzZ4GF6sqIWULBhmpR9kBiZtmHzyMHwZPI3wF4PhBBsGpZ9kDDodNhGbM6TZf",
- "SEmXEJDMEfnZMTf8qsUl8JrQyXyLn0oJayYqVXcagBGn3i2Bc6EhKSUsWITGzh06DIOxbRwHLpwMlAqu",
- "KeOQGeaMQAsNllkNwhRMuFvf6d/ic6rg66dDd3zzdeTuL0R313fu+KjdxkaJPZKRq9N8dQc2Llm1+o/Q",
- "D8O5FVsm9ufeRrLlhbltFizHm+ifZv88GiqFTKCFCH83KbbkVFcSTt7zh+YvkpBzTXlGZWZ+KexPP1a5",
- "ZudsaX7K7U+vxJKl52w5gMwa1qjChd0K+48ZL86O9SaqV7wS4rIqwwWlLcV1viVnL4c22Y55KGGe1tpu",
- "qHhcbLwycmgPvak3cgDIQdyV1DS8hK0EAy1NF/jPZoH0RBfyd/NPWeamty4XMdQaOnZXMpoPnFnhtCxz",
- "llKDxLfus/lqmABYRYI2LWZ4oZ58DEAspShBamYHpWWZ5CKleaI01TjSf0pYTE4m/zFr7C8z213Ngslf",
- "mV7n2MmIrFYMSmhZHjDGGyP6qB3MwjBo/IRswrI9FJoYt5toSIkZFpzDmnJ91KgsLX5QH+B3bqYG31ba",
- "sfjuqGCDCCe24RyUlYBtw3uKBKgniFaCaEWBdJmLef3D/dOybDCI30/L0uIDpUdgKJjBhimtHuDyaXOS",
- "wnnOXh6R78OxURQXPN+ay8GKGuZuWLhby91itW3JraEZ8Z4iuJ1CHpmt8WgwYv5dUByqFSuRG6lnL62Y",
- "xn9zbUMyM7+P6vznILEQt8PEhYqWw5zVcfCXQLm536GcPuE4c88ROe32vRnZmFHiBHMjWtm5n3bcHXis",
- "UXglaWkBdF/sXco4Kmm2kYX1ltx0JKOLwhyc4YDWEKobn7W95yEKCZJCB4bnuUgv/0bV6g7O/NyP1T9+",
- "OA1ZAc1AkhVVq6NJTMoIj1cz2pgjZhqigk/mwVRH9RLvanl7lpZRTYOlOXjjYolFPfZDpgcyorv8hP+h",
- "OTGfzdk2rN8Oe0QukIEpe5ydkyEz2r5VEOxMpgFaIQQprIJPjNZ9EJQvmsnj+zRqj761NgW3Q24RuENi",
- "c+fH4LnYxGB4Lja9IyA2oO6CPsw4KEZqKNQI+F46yATuv0MflZJu+0jGsccg2SzQiK4KTwMPb3wzS2Oc",
- "PZ0LeTPu02ErnDQmZ0LNqAHznXaQhE2rMnGkGDFb2QadgRov326m0R0+hrEWFs41/QOwoMyod4GF9kB3",
- "jQVRlCyHOyD9VZTpz6mCJ4/J+d9Ov3r0+NfHX31tSLKUYilpQeZbDYrcd7oZUXqbw4P+ylA7qnIdH/3r",
- "p95Q2R43No4SlUyhoGV/KGsAtSKQbUZMuz7W2mjGVdcAjjmcF2A4uUU7sbZ9A9pLpoyEVczvZDOGEJY1",
- "s2TEQZLBXmI6dHnNNNtwiXIrq7tQZUFKISP2NTxiWqQiT9YgFRMRb8ob14K4Fl68Lbu/W2jJFVXEzI2m",
- "34qjQBGhLL3h4/m+Hfpiwxvc7OT8dr2R1bl5x+xLG/nekqhICTLRG04ymFfLlia0kKIglGTYEe/o70Gj",
- "KHDBCjjXtCh/WizuRlUUOFBEZWMFKDMTsS2MXK8gFdxGQuzRztyoY9DTRYw30elhABxGzrc8RTvjXRzb",
- "YcW1YBydHmrL00CLNTDmkC1bZHl7bXUIHXaqeyoCjkHHK/yMho6XkGv6nZAXjSXweymq8s6FvO6cY5dD",
- "3WKcKSUzfb0Ozfgyb0ffLA3sR7E1fpYFvfDH160BoUeKfMWWKx2oFW+kEIu7hzE2SwxQ/GCVstz06atm",
- "r0VmmImu1B2IYM1gDYczdBvyNToXlSaUcJEBbn6l4sLZQLwGOorRv61DeU+vrJ41B0NdKa3MaquSoPe2",
- "d180HROa2hOaIGrUgO+qdjraVnY6GwuQS6DZlswBOBFz5yByritcJEXXs/bijRMNI/yiBVcpRQpKQZY4",
- "w9Re0Hw7e3XoHXhCwBHgehaiBFlQeWtgL9d74byEbYKBEorc/+EX9eAzwKuFpvkexGKbGHprNd95AftQ",
- "j5t+F8F1Jw/Jjkog/l4hWqA0m4OGIRQehJPB/etC1NvF26NlDRL9cX8oxftJbkdANah/ML3fFtqqHAj/",
- "c+qtkfDMhnHKhResYoPlVOlkH1s2jVo6uFlBwAljnBgHHhC8XlGlrQ+Z8QxNX/Y6wXmsEGamGAZ4UA0x",
- "I//iNZD+2Km5B7mqVK2OqKoshdSQxdbAYbNjrtewqecSi2DsWufRglQK9o08hKVgfIcsuxKLIKprV4sL",
- "sugvDh0S5p7fRlHZAqJBxC5Azn2rALthCNQAIEw1iLaEw1SHcuq4q+lEaVGWhlvopOJ1vyE0ndvWp/rn",
- "pm2fuKhu7u1MgMLIK9feQX5lMWuD31ZUEQcHKeilkT3QDGKd3X2YzWFMFOMpJLsoH1U80yo8AnsPaVUu",
- "Jc0gySCn2/6gP9vPxH7eNQDueKPuCg2JjWKKb3pDyT5oZMfQAsdTMeGR4BeSmiNoVIGGQFzvPSNngGPH",
- "mJOjo3v1UDhXdIv8eLhsu9WREfE2XAttdtzRA4LsOPoYgAfwUA99c1Rg56TRPbtT/DcoN0EtRxw+yRbU",
- "0BKa8Q9awIAN1QWIB+elw947HDjKNgfZ2B4+MnRkBwy6b6jULGUl6jo/wPbOVb/uBFE3I8lAU5ZDRoIP",
- "Vg0sw/7Ext90x7yZKjjK9tYHv2d8iywnZwpFnjbwl7BFnfuNDewMTB13octGRjX3E+UEAfXhYkYED5vA",
- "hqY63xpBTa9gS65AAlHVvGBa24DttqqrRZmEA0T9GjtmdE48GxTpd2CMV/EchwqW19+K6cTqBLvhu+go",
- "Bi10OF2gFCIfYSHrISMKwah4D1IKs+vMxY776GFPSS0gHdNGD259/d9TLTTjCsh/i4qklKPKVWmoZRoh",
- "UVBAAdLMYESwek4X2dFgCHIowGqS+OXhw+7CHz50e84UWcCVf3BhGnbR8fAh2nHeCKVbh+sO7KHmuJ1F",
- "rg90+JiLz2khXZ6yP7LAjTxmJ990Bq+9ROZMKeUI1yz/1gygczI3Y9Ye0si4qAocd5QvJxg6tm7c93NW",
- "VDnVd+G1gjXNE7EGKVkGezm5m5gJ/u2a5j/V3fbodE0UGCsKyBjVkG9JKSEFG51vRDVVj31EbNxeuqJ8",
- "iRK6FNXSBY7ZcZDDVsraQmTFe0NEpRi94QlalWMc1wUL+wcaRn4BanSorknaagxXtJ7PvckZcxX6nYuY",
- "6KNeqelkUMU0SF03KqZFTvuVyQju2xKwAvw0E4/0XSDqjLDRx1e4LYZ6zeb+MTbyZugYlP2Jg1C25uNQ",
- "NJvRb/PtHUgZdiAioZSg8E4I7ULKfhWL8EWZuzTUVmko+qZz2/XXgeP3dlBBEzxnHJJCcNhGH1EzDj/i",
- "x+hxwntpoDNKCEN9u0J/C/4OWO15xlDjbfGLu909oV0XkfpOyLvyQdoBR8vTI1x+e/3bbsqbOiZpnkd8",
- "ee69SZcBqGn9vp1JQpUSKUMh6SxTU3vQnPvPPU5po/9NHUV7B2evO27HaRU+ZUSjLOQloSTNGZpsBVda",
- "Vql+zykahYKlRqKNvPY7bCZ84ZvE7ZIRs6Eb6j2nGGlWm4qiERILiNhFvgPw1kJVLZegdEe5WAC8564V",
- "46TiTONchTkuiT0vJUgM+TmyLQu6JQtDE1qQ30EKMq90W9zG51RKszx3HjQzDRGL95xqkgNVmvzI+MUG",
- "h/Nedn9kOegrIS9rLMRv9yVwUEwl8aio7+1XDFh1y1+54FV8/m4/W5+LGb95c7VFm1HzpPv/3f+vk3en",
- "yT9o8vtx8ux/zT58fHr94GHvx8fX33zz/9s/Pbn+5sF//Wdspzzsscc+DvKzl04VPXuJ+kbjdOnB/skM",
- "7gXjSZTIwvCJDm2R+/iw1RHQg7Y1Sq/gPdcbbghpTXOWGd5yE3Lo3jC9s2hPR4dqWhvRsT75tR4oxd+C",
- "y5AIk+mwxhtLUf1AwvizOvQCupdyeF4WFbdb6aVv+2rEB3SJxbR+OmmzqpwQfFe3oj4a0f35+KuvJ9Pm",
- "PVz9fTKduK8fIpTMsk3s1WMGm5hy5g4IHox7ipR0q0DHuQfCHo1ds8EU4bAFGK1erVj56TmF0mwe53A+",
- "Ft8ZeTb8jNsgeXN+0Ke4da4Ksfj0cGsJkEGpV7FsCy1BDVs1uwnQifMopVgDnxJ2BEddI0tm9EUXRZcD",
- "XeCrf9Q+xRhtqD4HltA8VQRYDxcyypIRox8UeRy3vp5O3OWv7lwdcgPH4OrOWTsQ/d9akHvff3tBZo5h",
- "qnv2Aa4dOngyGVGl3augVgSQ4WY2x4wV8t7z9/wlLBhn5vvJe55RTWdzqliqZpUC+ZzmlKdwtBTkxD80",
- "ekk1fc97ktZgGqjgiRcpq3nOUnIZKiQNedrUHv0R3r9/R/OleP/+Qy8Yoq8+uKmi/MVOkBhBWFQ6cYkJ",
- "EglXVMacTap+mI4j28wju2a1QraorEXSJz5w48d5Hi1L1X2g2l9+WeZm+QEZKvf80mwZUVpIL4sYAcVC",
- "g/v7WriLQdIrb1epFCjyW0HLd4zrDyR5Xx0fPwHSerH5m7vyDU1uSxhtXRl8QNs1quDCrVoJGy1pUtJl",
- "zKf1/v07DbTE3Ud5uUAbR54T7NZ6Keoj4XGoZgEeH8MbYOE4+NUbLu7c9vJJqOJLwE+4hdjGiBuNp/2m",
- "+xW8Hb3xdnXen/Z2qdKrxJzt6KqUIXG/M3VumqURsnz4g2JL1FZdGp85kHQF6aXLrwJFqbfTVncfYeME",
- "Tc86mLKZd+zLL8z9gB6BOZCqzKgTxSnfdh/hK9Dax/G+hUvYXogmdcQhr+7bj8DV0EFFSg2kS0Os4bF1",
- "Y3Q334VxoWJflv4tNT6q82RxUtOF7zN8kK3IeweHOEYUrUfKQ4igMoIIS/wDKLjBQs14tyL92PKMljG3",
- "N18kC4/n/cQ1aZQnF3EVrgat7vZ7AZjGS1wpMqdGbhcuA5V96BxwsUrRJQxIyKFTZuRz4pYjBwfZd+9F",
- "bzqx6F5ovfsmCrJtnJg1RykFzBdDKqjMdOLs/EzW7+c8E5hY0iFsnqOYVAckWqZDZcs5ZjPlDYEWJ2CQ",
- "vBE4PBhtjISSzYoqnxwLc4j5szxKBvgDH+7vStdyFoSIBYnC6mQsnud2z2lPu3RJW3ymFp+eJVQtR6Ra",
- "MRI+RqXHtkNwFIAyyGFpF24be0Jpkgg0G2Tg+GmxyBkHksSizQIzaHDNuDnAyMcPCbEWeDJ6hBgZB2Cj",
- "PxsHJq9FeDb58hAguUuCQP3Y6AkP/ob4ey0bf21EHlEaFs4GvFqp5wDUhSjW91cnUBaHIYxPiWFza5ob",
- "Nuc0vmaQXtYQFFs7OUJcRMWDIXF2hwPEXiwHrcleRTdZTSgzeaDjAt0OiOdik9gHm1GJd76ZG3qPhqTj",
- "89HYwbT5We4pMhcbjNLBq8WGQO+BZRgOD0ag4W+YQnrFfkO3uQVm17S7pakYFSokGWfOq8llSJwYM/WA",
- "BDNELveDlCs3AqBj7GjyFzvld6+S2hZP+pd5c6tNm1Ri/rVP7PgPHaHoLg3gr2+FqZOkvOlKLFE7RTvY",
- "pJ0fJhAhY0Rv2ETfSdN3BSnIAZWCpCVEJZcxz6nRbQBvnHPfLTBeYBYayrcPgggmCUumNDRGdB8n8TnM",
- "kxST3wmxGF6dLuXCrO+tEPU1Zd2I2LG1zE++AgwBXjCpdIIeiOgSTKPvFCrV35mmcVmpHSNlU8WyLM4b",
- "cNpL2CYZy6s4vbp5f3hppn1ds0RVzZHfMm4DVuaY2jgaObljahtcu3PBr+yCX9E7W++402CamomlIZf2",
- "HH+Sc9HhvLvYQYQAY8TR37VBlO5gkMGL1z53DOSmwMd/tMv62jtMmR97b9SOf3c7dEfZkaJrCQwGO1fB",
- "0E1kxBKmg8zA/aeoA2eAliXLNh1bqB11UGOmBxk8fD61DhZwd91gezAQ2D1jr2EkqHbqvEbAtzmeW5lr",
- "jkZh5qKd4C5kCOFUTPkKBX1E1a/l9uHqAmj+A2x/MW1xOZPr6eR2ptMYrt2Ie3D9pt7eKJ7RNW9NaS1P",
- "yIEop2UpxZrmiTMwD5GmFGtHmtjc26M/MauLmzEvvj199caBfz2dpDlQmdSiwuCqsF35p1mVzdI3cEB8",
- "BnSj83mZ3YqSwebXqcVCo/TVClwq6UAa7eW8bBwOwVF0RupFPEJor8nZ+UbsEnf4SKCsXSSN+c56SNpe",
- "EbqmLPd2Mw/tQDQPLm5c4tQoVwgHuLV3JXCSJXfKbnqnO346Guraw5PCuXYkuy5sPndFBO+60DHmeVs6",
- "r3tBMWOltYr0mROvCrQkJCpnadzGyufKEAe3vjPTmGDjAWHUjFixAVcsr1gwlmk2JidNB8hgjigyVTQt",
- "ToO7uXC1eirO/lUBYRlwbT5JPJWdg4rpTZy1vX+dGtmhP5cb2From+FvI2OE2Vq7Nx4CsVvACD11PXBf",
- "1iqzX2htkTI/BC6JAxz+4Yy9K3GHs97Rh6NmG7y4anvcwtI6ff5nCMPmWN9f18crry5t7MAc0To9TCUL",
- "KX6HuJ6H6nHkoZHPT8swyuV3CB86hNUpWiymtu405Yaa2Qe3e0i6Ca1Q7SCFAarHnQ/ccpgo01uoKbdb",
- "bctmtGLd4gQTRpXO7PgNwTiYe5G4Ob2a01gWUSNkGJhOGwdwy5auBfGdPe5V/drCzk4CX3LdltlH5CXI",
- "5g1gPyHNDQUGO+1oUaGRDJBqQ5lgav1/uRKRYSp+RbmtvmL62aPkeiuwxi/T60pITAGh4mb/DFJW0Dwu",
- "OWRp38SbsSWzhUUqBUHlCjeQLdpkqchV/6jfEDnUnC3I8TQon+N2I2Nrptg8B2zxyLaYU4WcvDZE1V3M",
- "8oDrlcLmj0c0X1U8k5DplbKIVYLUQh2qN7Xzag76CoCTY2z36Bm5j247xdbwwGDR3c+Tk0fP0Ohq/ziO",
- "XQCuMMwubpIhO/m7YydxOka/pR3DMG436lH0tbytDDfMuHacJtt1zFnClo7X7T9LBeV0CfFIkWIPTLYv",
- "7iYa0jp44Zkta6S0FFvCdHx+0NTwp4Hoc8P+LBgkFUXBdOGcO0oUhp6ashR2Uj+crZHkMgp7uPxH9JGW",
- "3kXUUSI/rdHU3m+xVaMn+zUtoI3WKaE270fOmugFn+ecnPm0Qphiuc6sbHFj5jJLRzEHgxkWpJSMa1Qs",
- "Kr1I/krSFZU0NezvaAjcZP7100ha6XZ6U34Y4J8c7xIUyHUc9XKA7L0M4fqS+1zwpDAcJXvQvPYITuWg",
- "MzfuthvyHe4eeqxQZkZJBsmtapEbDTj1rQiP7xjwlqRYr+cgejx4ZZ+cMisZJw9amR36+e0rJ2UUQsZy",
- "BTbH3UkcErRksMbYvfgmmTFvuRcyH7ULt4H+83oevMgZiGX+LMcUgeciop36VOe1Jd3FqkesA0PH1Hww",
- "ZDB3Q01JO630p+ejdxMFFfd0ecN237Flvng84B9dRHxmcsENbHz5diUDhBKk1Y+STFZ/D3zslDwXm7GE",
- "0zmFnnj+DVAURUnF8uyX5uVnp2qBpDxdRX1mc9Px16a+Wr04ewdG0/6tKOeQR4ez8uavXi6NSM7/FGPn",
- "KRgf2bZbSMEut7O4BvA2mB4oP6FBL9O5mSDEavtRXR20nS9FRnCeJsdcc1z7BTiCNOn/qkDp2AMl/GAD",
- "x9A2atiBzdJNgGeokR6R720J5RWQVgIh1AR9poj2q+mqzAXNppjB4uLb01fEzmr72CpBNkv4EhWh9io6",
- "NrEgfea4EGRf8Cf+PGL8OLvjtc2qlU7qpN6xB6imRZN2nHX8BKgihdg5Ii+DYqj2raoZwtDDgsnCaHX1",
- "aFY+Qpow/9GapitU+1qsdZjkx6e391SpgpKSdWmoOqcknjsDt8twbxPcT4kwuvkVU7ZyLqyh/ea1fgDu",
- "zA7+DWx7ebLi3FLK0QG3XJ1B8lC0e+DsFeldCVHIOog/UOi31SEOzfZ/jr2iKa66pQN6tSTtC8q65I+v",
- "iJ5SLjhLMcFU7Ip2JXbH+NlG5OLqGnL9EXcnNHK4ogUL6lA8h8XBEgaeETrE9Q39wVezqZY67J8aa7mu",
- "qCZL0MpxNsimvu6GszUyrsDlCMWCzAGfFLLlu0QOGXWHJ7Xb5EAywqc3A8rjd+bba2dawJj0S8ZRiXBo",
- "c4KftQZiBVBtNA+myVKAcutpvz9W70yfI3yKm8Hmw5GvGIpjWNefWbb1c/eHOvVeb+dlNm1fmLYuQVL9",
- "cyvK2U56WpZu0uGqLFF5QG/4IIIj3svEu48C5Nbjh6PtILed4Sp4nxpCgzU6u6HEe7hHGHWFkk71KyO0",
- "WorCFsSGiUWzJDAeAeMV49DUs41cEGn0SsCNwfM60E+lkmorAo7iaRdAc/Rwxxia0s69cduhuumhDEpw",
- "jX6O4W1siqsMMI66QSO4Ub6ty+ga6g6EiRdYv9shsl8qBaUqJ0Rl+GqhUzwlxjgM4/blmdoXQP8Y9GUi",
- "211Lak/OITfR0EPUeZUtQSc0y2IpW5/jV4JfSVah5AAbSKs6tWdZkhTzrrQT0fSpzU2UCq6qYsdcvsEt",
- "pwuqEUWoIayI5HcYH7rMt/hvLK/l8M64QI+DQw19VEd2WPalfuhkTOo1NJ0otkzGYwLvlNujo5n6ZoTe",
- "9L9TSs/Fsg3IJ04/sYvLhXsU42/fmosjzM7QS9Zqr5Y6eQIG9glfQxLVxvrZb5sr4VXWy96KDqW6Rt1u",
- "A8RwtbkpXn4D4b1B0g1q71froRwK8k0HY9Kpdq/jNCU7WdDgiyMbIWTfFiEUcevsUFSQDQoyn3u9x0mG",
- "PTlbxxMfBgj14WZ9gH7wsaykpMy53xtm0cesi3rvv0MYEw/bbHB3ES6WfNBi98N6KO7bJ2PD791qVJfg",
- "nsyXEtZMVN6x7SOfvEpof23Vdqoj76Pr7xtecarPaw4dNN5euKoAdplOJ//hFxsnR4Bruf03MOX2Nr1X",
- "56ov7VrzVNOE1AmlRyWYbt2KYxIVxnLiOdmwVWlrT52wHlm9HCMO9Ot+TSdn2UEXZiyv4sSOEjt28Spe",
- "w2mnmlRTeMRKoViT1z1W3mtkiOEFVugK0mb1x/LxPWtINSbzb+IWJMAhSbTMZEHB0C/ppwbU6ToS02Wd",
- "2pVqqp/Bf88d33sNFrxotNnPj8YnVjqto9OQT2M25CVwV7Oz/c5jdLT5YgGpZus9r+/+vgIevOyaeruM",
- "rb0dPMZjdfQyJm853OrYALTrcdxOeIIkircGZ+jtzSVs7ynSooZoOvapv2pvkrcDMYDcITEkIlQs+sMa",
- "kp1DnqmaMhALPtrKdocmA9pgJafgLekN5/IkaS6O5n3pjinjpWRGzWW6HvTqGgNxhx7o9StRDOsfL7Hw",
- "h6qrLPq8H6GWTs762RGvXN4QfCtZ+058BhFQ/jf/MNrOkrNLCGtNoafqisrMt4iaXrxVJ9lxH/Ve1fkq",
- "Cl2gF/XMrImN7b+jiuTbwgjoNBdGjEiGwsjb4ah1LMc9ZYNubPp3DLQ1cC1Aupp8KP/mQkGihY+l3QXH",
- "LlTYyKIbIUEN5ri0wA1mnnnbpNbBXL8UM81QF1AULpBIKKiBTgYJcIbn3IXsF/a7fzjkc73utTDV9Lq/",
- "6ICPimaqh8SQ6hfE3Zb7HyTdxNjEOLd1n1UsGw4H2faGlFJkVWov6PBg1Aa50bmmdrCSqJ0m7a+yoyME",
- "rzovYTuzSpCv1uB3MATaSk4W9CCLQmeT79T8pmJwL+8EvM9puZpOSiHyZMDZcdZP4dOl+EuWXkJGzE3h",
- "owcHKt+Q+2hjr73ZV6utT1lTlsAhe3BEyCm38dresd3OId2ZnN/Tu+bf4KxZZbNqOaPa0XseD3zFfFfy",
- "ltzMD7ObhykwrO6WU9lB9iSI2QykD5L0KlIH6misVt53NXdr8zREZaGIySRN2Zk9cTJ1iExT+aMJk+lL",
- "B3kurhKkoqTO/xXTOUy7NpP0GU+bbgbbcwjibahyF+iWrGhGUiElpGGP+BMHC1QhJCS5wPCbmGdwoY08",
- "VGBcMye5WBJRGjXXptHzPpRoWZpgLvvM1vZMrKNmIJEBKPes1k1jG/fn2VG95vDKOBeriL0FEe2xfHD5",
- "G0coB1etCMAcQaD7bU2nseo+7XV160MNVWvTomBpHN1/riiTwdiQPbWLIuurydGVVvKvAgdwFXXZ7vaQ",
- "2jp087F+0jpn8shjEQAw7DltwTDKf3ooGAus65jQCJLPaql12iq7yzpn3+ezszSeUqu1roCYsSsJ7pWa",
- "LUDXqZxTUr3yt5hp3tctjZ4CCp+Q2fIfVFlLiLfIuOp3XfFAlEkOa2g5lN3TuSpNQSm2hrBynu1MMoAS",
- "7ZNdqTnmKQ25XEeUcmtPAl/bGOxGZSuLWLtTZI/gFBXzNjyxx0SNPUoGojXLKtrCn7pFLbKhMmQRNuxh",
- "HckpDmYS8cXtYhF7YxuQ5qPnksdDG8KXm7VRBGfLauOpJcLmZKuSXvFhJSJid6r97bdfB8HBiOq8pB68",
- "8mW9KzdVIAcpYxdh9OoHRmUOBb7+a5j0xItbrm9ExrKmLqYiAzDVnGeM3oMmOixoVtAtydhiAdIa85Wm",
- "PKMyC5szTlKQmjKj2WzVzcVaA62sYLpXsjXcFQf1DCYm46JdygKSb53KcAupEz03EYnTXrVaDJVI7O1K",
- "/DkB3RjpGuOqBojAPYRG2doeMMFRQCIFvYQD51Hsd9g9DaYncbY/LXDWMVPEfK03zK02inX3wxAit1tQ",
- "DHG3ZyhMvdi86ZI2mgUtyf6C7NL4j83FOa4so++wB7zQYRgUZvS2GwfOZ34c9WONlGApH4YoobX8fT5I",
- "t8BG0gi2yDECrcEmwrUB9e19CRzM6kXttx2qIdp172KeRcFtkb+eW9jyJlu1LyAccxbkmuaf3rWLCThP",
- "ER+QvR02Boe+wRDJFpXqZi8TXtFRcwd+wLubmr9BV/TfwexRVCt1QzkRphbrfTAP3iw0t4aLhS/htQZO",
- "rnBMG8f26Gsydy+3SwkpU13R6MpX16hdYVhsyr0G2eg9vrd96/xF6FuQ8cJrGuR1k6kfdfwlbyBsjuhn",
- "ZioDJzdK5THq65FFBH8xHhWmUNtzXVy2Atxs5ZPOyw0h4Y4D3YKQ9QMD3frJ4cYuzwZzmUunUtBf5+jb",
- "uoXbyEXdrG1slGYfubvSuY8JroxXaTDdMbrTIgRLnBAElfz26DciYYE1DAV5+BAnePhw6pr+9rj92Rzn",
- "hw+j0tkni+u0OHJjuHljFPPL0Es/+5pt4FFpZz8qlmf7CKP1RLipAoqPYH91iQg+Sx3SX22sSf+oulpw",
- "twiQs4iJrLU1eTBV8Ph3xLtf1y3yyhf9OGklmd5ifkRvP2C/RiNQv6+jmVw0XK0furtPi0uoM2w2sU+V",
- "8rfr94LmeB9ZtZWbW0jkR+TbDS3KHNxB+ebe/C/w5K9Ps+Mnj/4y/+vxV8cpPP3q2fExffaUPnr25BE8",
- "/utXT4/h0eLrZ/PH2eOnj+dPHz/9+qtn6ZOnj+ZPv372l3uGDxmQLaATn41n8n+xWG9y+uYsuTDANjih",
- "JfsBtrYuoCFjX3GQpngSoaAsn5z4n/63P2FHqSia4f2vE5fsY7LSulQns9nV1dVR2GW2xGCHRIsqXc38",
- "PL2ShKdvzmovkbUC4Y7ad7LeuudJ4RS/vf32/IKcvjk7CurVn0yOj46PHmF58xI4LdnkZPIEf8LTs8J9",
- "nzlim5x8vJ5OZiugOcYGmj8K0JKl/pMEmm3d/9UVXS5BHrkyjOan9eOZFytmH13Qx/Wub7OwosnsYys2",
- "JtvTEysezD76RH67W7cy5bmYoKDDSCh2NZvNMT/I2KaggsbDS0FlQ80+org8+PvMJTSIf0S1xZ6HmQ8g",
- "i7dsYemj3hhYOz1SqtNVVc4+4n+QPgOw7POhmd7wGdo+Zh9bq3Gfe6tp/950D1usC5GBB1gsFjYx6a7P",
- "s4/232Ai2JQgmRH8MGTP/WpDq2e2GH7/5y1Poz/219GrCha1I721uQwoVq6O1yaY4Hm1R/0sQw6suyGu",
- "tsSItT3iMX58fHxQtdRxATPdwNr+ndZnXrtWdj2dPD0Q0J3Wn9ZzpAgwz2lGvJMe53706eY+4xgna7gy",
- "sbcOQvD000HQrufyA2zJa6HJd6geXU8nX33KnTjjRlijOcGWQbrG/hH5mV9yccV9SyOuVEVB5Xb08dF0",
- "qTCYQ7I1dcJiUOJr8gGjh2zgRvuonWZZj+it2AZKPxd4/w1hrFDL0j0+bpDWSK2MmyX01d5+zfQVRGLU",
- "bSSl9z5ykcEklCe1rOD6ljyhLbgbEM4iVhw0R2LRrYVPsBqAGg247sbY2JFHVY3uDF4XyanmBVNeXfjC",
- "U77wFGmnf/Lppj8HuWYpkAsoSiGpZPmW/Mzr1DE35nGnWRZ9pdI++nt53HSySVKRwRJ44hhYMhfZ1qfg",
- "bk1wCVZB7Qkys4/tOjpWpJtkkIOORuCb3+sK5/1FzLfk7GVPwrHdupz3+RabBvVpTt59tBqeUV8aBawL",
- "Yo8zhqVRurzpQ5xr7iJ7s5Cl0MRiIXOL+sKIvjCiWwk3ow/PGPkmqn3YxGy0d2dPfY61WAZPqvugjNFR",
- "PuvxvZON7+s/MX3HvvaBjAQfbBBKF81fWMQXFnE7FvE9RA4jnlrHNCJEd5g+NJZhYARf1q1WiU4O37zK",
- "qQxij/aZOU5xRGfc+BRc41MrdVFcWZ2O8qagb2QD71bP+8LyvrC8Pw/LO93PaNqCya01o0vYFrSs9SG1",
- "qnQmrgI/B8JiY5D6duC6fn7r79kVZTpZCOnejmM1l35nDTSfuUSRnV+b3Ey9L5hwKvgxjIGO/jqri2VF",
- "P3ZdJLGvzkUw0MiHZPrPjbs0dD8ia68dj+8+GLaMpRgc12+8aSezGb7HXAmlZ5Pr6ceOpy38+KEmgY/1",
- "XeFI4frD9f8EAAD//4XxG93x1QAA",
+ "H4sIAAAAAAAC/+y9e5PbNrIo/lVQOqfKiX/ijF/Jbly1dX4TO8nOjZO47En2nmP7ZiGyJWGHArgAOCPF",
+ "19/9FroBEiRBiZqZ2LtV+cseEY9Go9HobvTj/SxXm0pJkNbMnr6fVVzzDVjQ+BfPc1VLm4nC/VWAybWo",
+ "rFBy9jR8Y8ZqIVez+Uy4Xytu17P5TPINtG1c//lMwz9roaGYPbW6hvnM5GvYcDew3VWudTPSNlupzA9x",
+ "RkOcP5992POBF4UGY4ZQ/iTLHRMyL+sCmNVcGp67T4ZdC7tmdi0M852ZkExJYGrJ7LrTmC0FlIU5CYv8",
+ "Zw16F63STz6+pA8tiJlWJQzhfKY2CyEhQAUNUM2GMKtYAUtstOaWuRkcrKGhVcwA1/maLZU+ACoBEcML",
+ "st7Mnr6ZGZAFaNytHMQV/nepAX6DzHK9Ajt7N08tbmlBZ1ZsEks799jXYOrSGoZtcY0rcQWSuV4n7Ifa",
+ "WLYAxiV79e0z9vjx46/cQjbcWig8kY2uqp09XhN1nz2dFdxC+DykNV6ulOayyJr2r759hvO/9guc2oob",
+ "A+nDcua+sPPnYwsIHRMkJKSFFe5Dh/pdj8ShaH9ewFJpmLgn1PhONyWe/5PuSs5tvq6UkDaxLwy/Mvqc",
+ "5GFR9308rAGg075ymNJu0DcPsq/evX84f/jgw3+8Ocv+x//5xeMPE5f/rBn3AAaSDfNaa5D5Lltp4Hha",
+ "1lwO8fHK04NZq7os2Jpf4ebzDbJ635e5vsQ6r3hZOzoRuVZn5UoZxj0ZFbDkdWlZmJjVsnRsyo3mqZ0J",
+ "wyqtrkQBxdxx3+u1yNcs54aGwHbsWpSlo8HaQDFGa+nV7TlMH2KUOLhuhA9c0L8uMtp1HcAEbJEbZHmp",
+ "DGRWHbiewo3DZcHiC6W9q8xxlxW7WAPDyd0HumwRd9LRdFnumMV9LRg3jLNwNc2ZWLKdqtk1bk4pLrG/",
+ "X43D2oY5pOHmdO5Rd3jH0DdARgJ5C6VK4BKRF87dEGVyKVa1BsOu12DX/s7TYColDTC1+Afk1m37/3r9",
+ "049MafYDGMNX8JLnlwxkrgooTtj5kkllI9LwtIQ4dD3H1uHhSl3y/zDK0cTGrCqeX6Zv9FJsRGJVP/Ct",
+ "2NQbJuvNArTb0nCFWMU02FrLMYBoxAOkuOHb4aQXupY57n87bUeWc9QmTFXyHSJsw7d/eTD34BjGy5JV",
+ "IAshV8xu5agc5+Y+DF6mVS2LCWKOdXsaXaymglwsBRSsGWUPJH6aQ/AIeRw8rfAVgRMGGQWnmeUAOBK2",
+ "CZpxp9t9YRVfQUQyJ+xnz9zwq1WXIBtCZ4sdfqo0XAlVm6bTCIw49X4JXCoLWaVhKRI09tqjwzEYauM5",
+ "8MbLQLmSlgsJhWPOCLSyQMxqFKZowv36zvAWX3ADXz4Zu+PbrxN3f6n6u753xyftNjbK6Egmrk731R/Y",
+ "tGTV6T9BP4znNmKV0c+DjRSrC3fbLEWJN9E/3P4FNNQGmUAHEeFuMmIlua01PH0r77u/WMZeWy4Lrgv3",
+ "y4Z++qEurXgtVu6nkn56oVYify1WI8hsYE0qXNhtQ/+48dLs2G6TesULpS7rKl5Q3lFcFzt2/nxsk2nM",
+ "YwnzrNF2Y8XjYhuUkWN72G2zkSNAjuKu4q7hJew0OGh5vsR/tkukJ77Uv7l/qqp0vW21TKHW0bG/ktF8",
+ "4M0KZ1VVipw7JL7yn91XxwSAFAnetjjFC/Xp+wjESqsKtBU0KK+qrFQ5LzNjucWR/lPDcvZ09h+nrf3l",
+ "lLqb02jyF67Xa+zkRFYSgzJeVUeM8dKJPmYPs3AMGj8hmyC2h0KTkLSJjpSEY8ElXHFpT1qVpcMPmgP8",
+ "xs/U4pukHcJ3TwUbRTijhgswJAFTw3uGRahniFaGaEWBdFWqRfPDZ2dV1WIQv59VFeEDpUcQKJjBVhhr",
+ "Psfl8/YkxfOcPz9h38VjoyiuZLlzlwOJGu5uWPpby99ijW3Jr6Ed8Z5huJ1Kn7itCWhwYv5dUByqFWtV",
+ "OqnnIK24xn/1bWMyc79P6vzvQWIxbseJCxUtjznScfCXSLn5rEc5Q8Lx5p4TdtbvezOycaOkCeZGtLJ3",
+ "P2ncPXhsUHiteUUA+i90lwqJSho1IlhvyU0nMrokzNEZjmgNobrxWTt4HpKQICn0YPi6VPnlX7lZ38GZ",
+ "X4SxhscPp2Fr4AVotuZmfTJLSRnx8WpHm3LEXENU8NkimuqkWeJdLe/A0gpuebQ0D29aLCHUYz9keqAT",
+ "ustP+B9eMvfZnW3H+mnYE3aBDMzQcfaPDIXT9klBoJlcA7RCKLYhBZ85rfsoKJ+1k6f3adIefUM2Bb9D",
+ "fhG4Q2p758fga7VNwfC12g6OgNqCuQv6cOOgGGlhYybA99xDpnD/Pfq41nw3RDKOPQXJboFOdDV4GmR8",
+ "47tZWuPs2ULpm3GfHluRrDU5M+5GjZjvvIckbFpXmSfFhNmKGvQGal/59jON/vApjHWw8Nry3wELxo16",
+ "F1joDnTXWFCbSpRwB6S/TjL9BTfw+BF7/dezLx4++vXRF186kqy0Wmm+YYudBcM+87oZM3ZXwufDlaF2",
+ "VJc2PfqXT4Khsjtuahyjap3DhlfDocgASiIQNWOu3RBrXTTjqhsApxzOC3CcnNDOyLbvQHsujJOwNos7",
+ "2YwxhBXtLAXzkBRwkJiOXV47zS5eot7p+i5UWdBa6YR9DY+YVbkqsyvQRqjEa8pL34L5FkG8rfq/E7Ts",
+ "mhvm5kbTby1RoEhQlt3K6Xyfhr7YyhY3ezk/rTexOj/vlH3pIj9YEg2rQGd2K1kBi3rV0YSWWm0YZwV2",
+ "xDv6O7AoClyIDby2fFP9tFzejaqocKCEyiY2YNxMjFo4ud5AriR5QhzQzvyoU9DTR0ww0dlxADxGXu9k",
+ "jnbGuzi244rrRkh89DA7mUdarIOxhGLVIcvba6tj6KCp7pkEOA4dL/AzGjqeQ2n5t0pftJbA77SqqzsX",
+ "8vpzTl0O94vxppTC9Q06tJCrsut9s3Kwn6TW+EkW9CwcX78GhB4p8oVYrW2kVrzUSi3vHsbULClA8QMp",
+ "ZaXrM1TNflSFYya2NncggrWDtRzO0W3M1/hC1ZZxJlUBuPm1SQtnI/4a+FCM79s2lvfsmvSsBTjqynnt",
+ "VltXDF9vB/dF2zHjOZ3QDFFjRt6umkdHakXTkS9AqYEXO7YAkEwt/AORf7rCRXJ8erZBvPGiYYJfdOCq",
+ "tMrBGCgyb5g6CFpoR1eH3YMnBBwBbmZhRrEl17cG9vLqIJyXsMvQUcKwz77/xXz+CeC1yvLyAGKxTQq9",
+ "jZrvXwGHUE+bfh/B9SePyY5rYOFeYVahNFuChTEUHoWT0f3rQzTYxduj5Qo0vsf9rhQfJrkdATWg/s70",
+ "flto62rE/c+rt07CcxsmuVRBsEoNVnJjs0Ns2TXq6OBuBREnTHFiHHhE8HrBjaU3ZCELNH3RdYLzkBDm",
+ "phgHeFQNcSP/EjSQ4di5uwelqU2jjpi6qpS2UKTWIGG7Z64fYdvMpZbR2I3OYxWrDRwaeQxL0fgeWbQS",
+ "QhC3zVOLd7IYLg4fJNw9v0uisgNEi4h9gLwOrSLsxi5QI4AI0yKaCEeYHuU0flfzmbGqqhy3sFktm35j",
+ "aHpNrc/sz23bIXFx297bhQKDnle+vYf8mjBLzm9rbpiHg234pZM90AxCj91DmN1hzIyQOWT7KB9VPNcq",
+ "PgIHD2ldrTQvICug5LvhoD/TZ0af9w2AO96qu8pCRl5M6U1vKTk4jewZWuF4JiU8MvzCcncEnSrQEojv",
+ "fWDkAnDsFHPydHSvGQrnSm5RGA+XTVudGBFvwytl3Y57ekCQPUefAvAIHpqhb44K7Jy1umd/iv8G4ydo",
+ "5IjjJ9mBGVtCO/5RCxixoXoH8ei89Nh7jwMn2eYoGzvAR8aO7IhB9yXXVuSiQl3ne9jduerXnyD5zMgK",
+ "sFyUULDoA6mBVdyfkf9Nf8ybqYKTbG9D8AfGt8RySmFQ5OkCfwk71LlfkmNnZOq4C102Maq7n7hkCGhw",
+ "F3MieNwEtjy35c4JanYNO3YNGpipFxthLTlsd1Vdq6osHiD5rrFnRv+IR06RYQemvCq+xqGi5Q23Yj4j",
+ "nWA/fBc9xaCDDq8LVEqVEyxkA2QkIZjk78Eq5XZdeN/x4D0cKKkDpGfa+ILbXP/3TAfNuAL236pmOZeo",
+ "ctUWGplGaRQUUIB0MzgRrJnTe3a0GIISNkCaJH65f7+/8Pv3/Z4Lw5ZwHQIuXMM+Ou7fRzvOS2Vs53Dd",
+ "gT3UHbfzxPWBDz7u4vNaSJ+nHPYs8CNP2cmXvcGbVyJ3pozxhOuWf2sG0DuZ2ylrj2lkmlcFjjvpLSca",
+ "OrVu3PfXYlOX3N7FqxVc8TJTV6C1KOAgJ/cTCyW/ueLlT003DCaB3NFoDlmOIRATx4IL14eiJg7phq03",
+ "mdhsoBDcQrljlYYcyMvfiXymgfGEkf9fvuZyhZK+VvXKO6DROMipa0M2FV3LwRBJachuZYbW6RTn9k7H",
+ "IdDDyUHAnS7WN22T5nHNm/l8bM+UKzVCXt/Un3zdms9GVVWH1KtWVSXkdKNVJnDxjqAW4aedeOIbCKLO",
+ "CS1DfMXb4k6B29zfx9beDp2Ccjhx5BLXfhzzinN6crm7A2mFBmIaKg0G75bYvmToq1rGkWn+8jE7Y2Ez",
+ "NMFT119Hjt+rUUVPyVJIyDZKwi4ZjC0k/IAfk8cJ77eRzihpjPXtKw8d+HtgdeeZQo23xS/udv+E9p+a",
+ "zLdK39VbJg04WS6f8HR48J3cT3nTB05elok3QR+30mcAZt7EyQvNuDEqFyhsnRdmTgfNPyP6IJcu+l82",
+ "3rh3cPb64/Yev+KQSDTuQlkxzvJSoOlXSWN1ndu3kqNxKVpqwmspaNHj5sZnoUnavpkwP/qh3kqOHmuN",
+ "ySnpabGEhH3lW4BgdTT1agXG9pSUJcBb6VsJyWopLM61ccclo/NSgUbXoRNqueE7tnQ0YRX7DbRii9p2",
+ "xXYMyzJWlKV/iXPTMLV8K7llJXBj2Q9CXmxxuPBaH46sBHut9GWDhfTtvgIJRpgs7V31HX1Fx1e//LV3",
+ "gsUwevpMbzdu/DZ2a4e2pzY0/P989l9P35xl/8Oz3x5kX/1/p+/eP/nw+f3Bj48+/OUv/7f70+MPf/n8",
+ "v/4ztVMB9lTQkIf8/LlXac+fo97SPt4MYP9ohvuNkFmSyGI3jB5tsc8wQNYT0Oddq5Zdw1tpt9IR0hUv",
+ "ReF4y03IoX/DDM4inY4e1XQ2omfFCms9Uhu4BZdhCSbTY403lqKGDonp8Dx8TfQRd3helrWkrQzSN0Wf",
+ "BMcwtZw3IZiUneUpw/i8NQ9ejf7PR198OZu3cXXN99l85r++S1CyKLap6MkCtiklzx8QPBj3DKv4zoBN",
+ "cw+EPekDR04Z8bAb2CxAm7WoPj6nMFYs0hwu+PR7Y9FWnktytnfnB98md/7JQy0/PtxWAxRQ2XUqa0NH",
+ "UMNW7W4C9PxFKq2uQM6ZOIGTvrGmcPqi98YrgS8xewBqn2qKNtScAyK0QBUR1uOFTLKIpOgHRR7PrT/M",
+ "Z/7yN3euDvmBU3D152weIsPfVrF7331zwU49wzT3KJCXho5CLxOqtI8u6ngSOW5GuWpIyHsr38rnsBRS",
+ "uO9P38qCW3664Ebk5rQ2oL/mJZc5nKwUexoClp5zy9/KgaQ1mk4qChVjVb0oRc4uY4WkJU9KETIc4e3b",
+ "N7xcqbdv3w2cKobqg58qyV9ogswJwqq2mU9wkGm45jr1aGWaAHccmTKY7JuVhGxVk2UzJFDw46d5Hq8q",
+ "0w90HS6/qkq3/IgMjQ/jdFvGjFU6yCJOQCFocH9/VP5i0Pw62FVqA4b9fcOrN0Ladyx7Wz948BhYJ/Lz",
+ "7/7KdzS5q2CydWU0ELdvVMGFk1oJW6t5VvFV6m3s7ds3FniFu4/y8gZtHGXJsFsn4jR41ONQ7QICPsY3",
+ "gOA4OnoOF/eaeoVkVukl4CfcQmzjxI32xf6m+xXFoN54u3pxrINdqu06c2c7uSrjSDzsTJPjZuWErOBG",
+ "YcQKtVWfDmgBLF9DfunztMCmsrt5p3vw1PGCZmAdwlAGH4ogwxwS+LKwAFZXBfeiOJe7fjC/AWuDP/Ar",
+ "uITdhWpTUBwTvd8NJjdjBxUpNZIuHbHGx9aP0d987w6Gin1VhZhsDM4LZPG0oYvQZ/wgk8h7B4c4RRSd",
+ "YOcxRHCdQAQR/wgKbrBQN96tSD+1PKdlLOjmS2TzCbyf+Sat8uQ9t+LVoNWdvm8A04Gpa8MW3Mntymey",
+ "ooDpiIvVhq9gREKOH3cmhiV3HoRwkEP3XvKmU8v+hTa4b5IgU+PMrTlJKeC+OFJBZabnrxdmovdD/zKB",
+ "CSo9whYlikmNYyMxHa47j2yUcW8MtDQBg5atwBHA6GIklmzW3IQkW5iLLJzlSTLA75gAYF/al/PI1SxK",
+ "ONYkdQk8t39OB9qlT/4SMr6ENC+xajkhZYuT8NG7PbUdSqIAVEAJK1o4NQ6E0iYjaDfIwfHTclkKCSxL",
+ "ea1FZtDomvFzgJOP7zNGFng2eYQUGUdg47s4Dsx+VPHZlKtjgJQ+mQIPY+OLevQ3pOO+yI/biTyqcixc",
+ "jLxq5YEDcO/q2NxfPYdbHIYJOWeOzV3x0rE5r/G1gwyyj6DY2ss14j0zPh8TZ/c8gNDFctSa6Cq6yWpi",
+ "mSkAnRbo9kC8UNuMAj+TEu9iu3D0nnRtxzDU1MGkPC/3DFuoLXr74NVCrtQHYBmHI4ARafhbYZBesd/Y",
+ "bU7A7Jt2vzSVokKDJOPNeQ25jIkTU6YekWDGyOWzKHXLjQDoGTvaPMhe+T2opHbFk+Fl3t5q8zYlWYga",
+ "Sh3/sSOU3KUR/A2tME2ylZd9iSVpp+g6rXTzzEQiZIroHZsYPtIMn4IMlIBKQdYRorLL1Mup020Ab5zX",
+ "oVtkvMBsNlzuPo88oTSshLHQGtGDn8SnME9yTKKn1HJ8dbbSS7e+V0o11xQ9I2LHzjI/+grQlXgptLEZ",
+ "vkAkl+AafWtQqf7WNU3LSl1fK0o5K4o0b8BpL2GXFaKs0/Tq5/3+uZv2x4YlmnqB/FZIclhZYIrkpAfm",
+ "nqnJSXfvgl/Qgl/wO1vvtNPgmrqJtSOX7hz/Jueix3n3sYMEAaaIY7hroyjdwyCjyNkhd4zkpuiN/2Sf",
+ "9XVwmIow9kGvnRC/O3ZH0UjJtUQGg72rEPhM5MQSYaMMw8OQ1pEzwKtKFNueLZRGHdWY+VEGj5CXrYcF",
+ "3F0/2AEMRHbPVFSNBtNNwdcK+JQrupMB52QSZi66ifJihhBPJUyodDBEVBN1dwhXF8DL72H3i2uLy5l9",
+ "mM9uZzpN4dqPeADXL5vtTeIZn+bJlNZ5CTkS5byqtLriZeYNzGOkqdWVJ01sHuzRH5nVpc2YF9+cvXjp",
+ "wf8wn+UlcJ01osLoqrBd9W+zKsr2N3JAQiZ1p/MFmZ1EyWjzmxRlsVH6eg0+JXUkjQ5yZ7YPDtFR9Ebq",
+ "ZdpD6KDJ2b+N0BL3vJFA1TyRtOY7eiHpvorwKy7KYDcL0I548+DipiVgTXKFeIBbv65Ej2TZnbKbwelO",
+ "n46Wug7wpHiuPUmzN5QX3jAl+0/o6PO8q/yr+4Zj5kuyigyZk6w3aEnITCnytI1VLowjDklvZ64xw8Yj",
+ "wqgbsRYjT7GyFtFYrtmU3DY9IKM5ksg0yfQ6Le4Wytf8qaX4Zw1MFCCt+6TxVPYOKqZJ8db24XXqZIfh",
+ "XH5gstC3w99GxoizvvZvPARiv4ARv9QNwH3eqMxhoY1Fyv0QPUkc8eAfzzi4Evc81nv68NRMzovr7otb",
+ "XKJnyP8cYVCu9sP1gYLy6tPPjsyRrPcjTLbU6jdI63moHicClkKeW4FeLr9BHOgQV7nosJjGutOWLWpn",
+ "H93uMekmtkJ1nRRGqB53PnqWw4SbwULNJW01BZJ0fN3SBBN7lZ7S+C3BeJgHnrglv17wVDZSJ2Q4mM7a",
+ "B+COLd0qFjoH3Jsm2oJmZ9FbctNWUDB6BbqNJRwmtrmhwEDTThYVWskAqTaWCeb0/lcalRimltdcUhUX",
+ "14+Oku9tgIxfrte10phKwqTN/gXkYsPLtORQ5EMTbyFWggqU1AaiChh+ICr+RFTkq4g0MUQeNedL9mAe",
+ "leHxu1GIK2HEogRs8ZBaLLhBTt4Yopoubnkg7dpg80cTmq9rWWgo7NoQYo1ijVCH6k3zeLUAew0g2QNs",
+ "9/Ar9hk+2xlxBZ87LPr7efb04VdodKU/HqQuAF9gZh83KZCd/M2zkzQd47sljeEYtx/1JBl1TxXmxhnX",
+ "ntNEXaecJWzped3hs7Thkq8g7SmyOQAT9cXdRENaDy+yoPJIxmq1Y8Km5wfLHX8a8T537I/AYLnabITd",
+ "+McdozaOntryFjRpGI5qLfnMxAGu8BHfSKvwRNRTIj+u0ZTut9Sq8SX7R76BLlrnjFP+kFK03gshXzo7",
+ "D+mJMFVzk6GZcOPmcktHMQedGZas0kJaVCxqu8z+zPI11zx37O9kDNxs8eWTRHrqbppUeRzgHx3vGgzo",
+ "qzTq9QjZBxnC92WfSSWzjeMoxedttEd0Kkcfc9PPdmNvh/uHniqUuVGyUXKrO+TGI059K8KTewa8JSk2",
+ "6zmKHo9e2UenzFqnyYPXbod+fvXCSxkbpVM5B9vj7iUODVYLuELfvfQmuTFvuRe6nLQLt4H+0748BJEz",
+ "EsvCWU4qAlebX4JZdtRn34nwv/zgyykOZO8RPwNyJGj6fORYhKRLEklo6MbHcNXs7w//zjQsfYHE+/cR",
+ "6Pv3516Y+/uj7mdiUvfvpzPxJG0a7tcWC0exwn6mAtc3tYdfq4SFIaS9b15DfLxBwsIzxmrdB3eUF36o",
+ "OeumGP/4d+HdeLKlXyvTp+Dt2zf4JeAB/+gj4hMfedzA1h+DVjJCKFGJhSTJFM33yE+Cs6/Vdirh9Dhp",
+ "IJ5/ARQlUVKLsviljd7tsTbNZb5OvnsuXMdf21p7zeLo8CZTQK65lFAmhyOd4degWyS0n3+oqfNshJzY",
+ "tl9Ug5bbW1wLeBfMAFSY0KFX2NJNEGO1GxjZON6XK1UwnKfNN9ge12Exlihl/j9rMDZ1YeEHcv5D+7Zj",
+ "B5SxnYEs0Kpwwr6jctprYJ1kUqjNh2wf3cj3uioVL+aYheTim7MXjGalPlQxijLGr1CZ7a6iZ9eMUqlO",
+ "cyMPxZ/SIS7Tx9nvc+9WbWzWJHhPBRG7Fm0KetF760E1N8bOCXseFcaleGM3BMMkNHrjNPNmNJJxkSbc",
+ "f6zl+RpV9w5rHSf56aUOAlWaqLxoUyasyS+K587B7asdULGDOVN2DfpaGKqiDFfQjVtugvi96SjEMXeX",
+ "p2spiVJOjrjlmmyix6I9AEdXZHgOSkLWQ/yRihtVCjm28sNr7JVMd9YvIzGoK0pRsE35p1AdP+dSSZFj",
+ "srHUFe3LLU95K52Ql61vjA9H3J/QxOFKFq9o3Ck9FkfLWQRG6BE3fKyJvrpNJeqgPy3W9V1zy1Zgjeds",
+ "UMxDDRZvLxbSgM8Xi8W5Iz6pdOf9GTlk0qUha56+jiQjDJ8aMQB867796M1DGFdwKSQqgh5tXvAjiy5W",
+ "g7VOexSWrRQYv55uDLl54/qcYDh1Adt3J6F6LI5Bz7du2eSrMBzqLHgueE8B1/aZa+uTXDU/dzzVadKz",
+ "qvKTjlfoScoDditHEZx4gc7CE2CE3Gb8eLQ95LbX5QjvU0docIUOC1DhPTwgjKZaTa8SmhNaiaKwBSNX",
+ "v2SmCyETYLwQEtraxokLIk9eCbgxeF5H+plcc0si4CSedgG8JIU6wdCM9U9Utx2qn+LLoQTXGOYY38a2",
+ "0M4I42gatIIbl7umpLKj7kiYeIa13D0ih2VzUKryQlSBkSe9QjopxuEYdyjV1b0ARvT8jkxE3THf3bE3",
+ "0Vgw8aIuVmAzXhSp9L1f41eGX1lRo+QAW8jrJs1rVbEcc+d0kwkNqc1PlCtp6s2euUKDW04XVaZKUENc",
+ "HSvsMAYrLXb4byrH6fjOeGedo91Fg2dOcVwGraH7a0rqdTSdGbHKpmMC75Tbo6Od+maE3va/U0ov1aoL",
+ "yKcw241wuXiPUvztG3dxxBk2Bol76WppEmCgc6YK9URRbWxCt7tcCa+yQSZffBRs6hXuN0CMVx6c4+U3",
+ "4qIdG2HpfiXD5Jijdj4aV8Ctj3C0nO1lQaNRY+Tl1TPrDi3sY55d5Nh1d+ZQv9a9CA0ug0OAvg/+yKzi",
+ "wrtQtMxiiFkfuTCMJZni09xucH8RPh5g1GL3/dWY735IqIff+5XJLsGnPag0XAlVB+eE4L0WVEL6tVPn",
+ "q4meSK5/aHjFqT6tOXTUeHvhK0TQMr1O/v0v5OvIQFq9+xcw5Q42fVDzbCjtknmqbcKa5OKTko13bsUp",
+ "ySZTeQ29bNipunagZtyArJ5PEQeGNeDms/PiqAszlRtzRqOkjl26ott46rA2XRgesUoZ0eb4T5V6m+gm",
+ "eoHV2qLUZ8Oxgo/WFeQWCzu0vica4JhEaG6yqHjsHynERtTpxpvWZw7bly5sWM3hwB0/iOiLolIpE/7J",
+ "9ORYZ42HIfJpzGi9Aunrt3ZjdSZHDCyXkFtxdSCC8m9rkFF03jzYZagOexRQKRoPdEzAc7zVsQVoX4Dj",
+ "XniiRJi3BmcsfuoSdvcM61BDMjX/PFy1N8m9ghhA7pA5ElEm5cFDhmTvVCFMQxmIheAxR92hzWI3WtUr",
+ "ige+4VyBJN3F0cYI75kyXVZo0lyu61GR8+hMPRZkOaxKMq5/PMciMKapuBlyt8RaOjsfZri89rlfMN61",
+ "eTsJWWDAhN9CcDvNUopLiOuO4UvVNddFaJE0vQSrTrbnPhpERoaKGn2gl83MovVvHsbCJXKmoRd7Xion",
+ "RmRjoQBdl+LGH+eeIccpSuGPztIOriVoX58R5d9SGcisCv7Q++DYhwryDrsREsxonlICbjR70Ks2PRLm",
+ "a+aYLYh7p7B4gUzDhjvodJTEaHzOfch+Rt9D8FfI13vQwtTQ6+HCEcGzXZgBEmOqXzJ/Wx4OKruJsUlI",
+ "STXATSqjkQTdfQ2ptCrqnC7o+GA0BrnJ+cL2sJKknSYfrrKnI0SRuZewOyUlKFTcCDsYA02SE4EeZcLo",
+ "bfKdmt9MCu7VnYD3KS1X81mlVJmNPHacD9Mw9Sn+UuSXUDB3UwQP0JEqSOwztLE3r9nX611IO1RVIKH4",
+ "/ISxM0k+9+Fhu5sHvDe5vGf3zb/FWYuaMqN5o9rJW5l2XsacZfqW3CwMs5+HGXCs7pZT0SAHkvxsR1JA",
+ "aX6dqAl2MlUrHz419+s0tURFUKRkktf0YvUMD3rKcHSthQXv2ECXuNtI5l+6mClVykkQrqfF7zcOpW5H",
+ "SjVycceTIUAW5JQ4zwYKP3gSAU0NpgOOQo2PUFu+pvUTGopHZamuMzxGWZPELqV0uXbdWyKk7W27OXJb",
+ "QORwxI2XIHZszQuWK60hj3uk43QIqI3SkJUK/Y9ST6NL6wTCDTrnS1aqFVOV0/MpF2R4RErWVormuqs6",
+ "UhRzThBk9OI1ktUDjI8x9+BS4yG8e0o5HV8m6mKdMFzhhoXdOroWlCe4o0u4RGBOIPTDRruzVKmr7rr6",
+ "RdfGSiBatRF5Gt3/Xu46o042KepNocJnUaYoTmyGBzzmKc3rLJ6eIZpB8kWZ5NX++PlXKqRz91+8wvvj",
+ "siV45jLCzxI1m4kNZ/noZdEDACGl0CJba0q9HLPypqCbWlEoIr6x9QGdyHDQleF2sLkR7hKoD/sJJVXx",
+ "LXEQmt3xBelCLPXIoUo6Sez3SaAqoIupnglNpvmJ/DMCYNxXoQPDJI+FY8FYYlXdjCeQfN7oifNO0XPR",
+ "uyRCFlBihjknO9EamBu71uBje6n8Z6/eWMXtOsiNrvnQmiML2ILBwFsqmsQN2R6DDdTXHu0L5KrKSriC",
+ "jguHDziu8xyMEVcQ1y2lzqwAqPBFoK+npnwT4uuwp7z4tWfR6/YU7Ca1GUIs7RQ7oKokFautzOiYmKlH",
+ "yUF0JYqad/BnblHBcax4Y+K+DrC+m8YpjmYS6cXtYxEHvYmQ5pPnUqadieJ498YMibMVzXMFEWF7sk3F",
+ "r+W42j4kylbcnF77NELsN1vI8eruesvcHicMB2Oml8tiVM7UzQ7f1PwzSmX7iGxQCTath0Go5B2nnQq6",
+ "gu+buBrJUC1MYgBhWt6AvrfQ+nZGzTZ8xwqxXIKmpzhjuSy4LuLmQrIctOVCsmu+MzfXyRy0uob5QbXM",
+ "cWocNDCrlIKGVmUCpNx5hX9MZZqg6uC7a0LNoWvbqrEitYNdSQcD8a1TDdErcoQIfCoKVAzpsCqJUjnb",
+ "8Es4ch4jfoP902CCKG+5twpnnTLFh720/hOiDg/8z1LYvdRO8l7fTZXeEYkYAw3KVevMQJszpMGUZ/EF",
+ "lUqLvYv7lUfCXpNRk+aDkUyqXTF9ZBfRrOPd0mOZ3ExXVzuWo5T/MvHwDHm72eOuACaq1ZZ7c/NQLBlc",
+ "CoSUuff+PlJqIXWBF4UYK42/Bp+u3J+t7rSNCdCNM93SHdm70hBVqsryKW9YBZTgWA1pLR7SLowTbGRV",
+ "fuBaSF6SI1ypqyKpJfIHPBYkGqC3T3Mhzvt+aF0hoDl4WHc5rzWKsdd8dzglZisIpF34aeSggwfPpAZq",
+ "v8F0xA2V8klmnDxGQExwnVQ1m2Guv7tfDMWmtK/nv99y/PtYegFn0itKWKNwH721qlQglQStcblLMY3w",
+ "AnSDBY7JhxO8q+9sq5rT8ntsUPKSvFkK6EmgDT1tE9iMarbvd36KM8S3aQs0OWyjs0TQSPv84odWU51W",
+ "PT50OABe7BMX1Y8Pz5MenE8c//9Dg5RoKe/GKKGz/ENudn6BrWofbZGXlq0FqtdBMaPdfYl8KM2zxjVx",
+ "5GoeeDBiOngnnpVlwvORBHgqLh4RjrsX9RUvP773ItYJOEN8QPFq3N8hdn+LkUyoNDcLvn3BJ80dubrd",
+ "3dTyJXpb/g3cHiWvBT+UtxkMmD+qX7ykp6llqDR8BZJd45hksX34JVv4BFOVhlyYvi3iOhQBbLy9sCau",
+ "D3je2gPuZYfW+YuytyDjZTDtsR/bgmL4+rKSLYTtEf3ETGXk5CapPEV9A7JI4C/Fo+JMzweui8tODEcr",
+ "1UU3mtJwx7EcUVTmkbEcwxzWU5dH8Qru0qkNDNc5+bbu4DZxUbdrmxqINDkbFFZ7mhI/lM7c5LpjANOd",
+ "pHA6KoHT7xC6RDjyY/h5UxTzy1gyC0rYMJI3pbcftSiLQ4TRyYLzoamRj3lefvX50j7uXRogIHfq4VH1",
+ "JatvEQNCiEmstTN5NFWU32ZCahvfLZHIBl2V8loLu8M07kHjFb8mg6y+axz2fcBHY0T1d59Vl9AUAmjd",
+ "+2sTbtfvFC/xPiLbrnS3kCpP2DdbvqlKbxNhf7m3+BM8/vOT4sHjh39a/PnBFw9yePLFVw8e8K+e8Idf",
+ "PX4Ij/78xZMH8HD55VeLR8WjJ48WTx49+fKLr/LHTx4unnz51Z/uOT7kQCZAZyFp6Ox/Z2flSmVnL8+z",
+ "CwdsixNeie9hR+XLHRmHwug8x5MIGy7K2dPw0/8fTthJrjbt8OHXmc9JOFtbW5mnp6fX19cncZfTFfrz",
+ "ZlbV+fo0zDOonH728rx5N6dnF9zRxmOKfHE8KZzht1ffvL5gZy/PT1qCmT2dPTh5cPLQja8qkLwSs6ez",
+ "x/gTnp417vupJ7bZ0/cf5rPTNfASw1/cHxuwWuThkwZe7Pz/zTVfrUCf+Grx7qerR6dBrDh97/2aP+z7",
+ "dhoXXjx933H/Lg70xMJsp+9DvvH9rTsJvb3be9RhIhT7mp0uMAXe1KZgosbjS0Flw5y+R3F59PdTn7Mr",
+ "/RHVFjoPpyFGIt2yg6X3dutg7fXIuc3XdXX6Hv+D9BmBRRHyp3YrT/GB4PR9ZzX+82A13d/b7nGLq40q",
+ "IACslkuqn7Dv8+l7+jeaCLYVaOEEP4xK8b9S9OApZjXdDX/eyTz543Adg+LFyceWV5Sui7NSGJsuoTbD",
+ "80pH/bxADmz7UVxUCZEe6PAYP3rwIPAurxlEdHfqj2lUx2iaT3g/dmx4pw2Z176VfZjPnhwJ6F7rTyfi",
+ "PgHM17xgwQ0T53748eY+lxgK5rgyo1sHIXjy8SDolp38HnbsR2XZt6gefZjPvviYO3EunbDGS4Yto6zy",
+ "wyPys7yU6lqGlk5cqTcbrneTj4/lK4NPEVpccS8sRpWIZ+/QQZ5cc7tH7awoBkRPYhsY+7XC+28MYxuz",
+ "qnx+nRZprdQqpFvCUO0doOpiDYkwTAoWCm9EUhUwi+VJq2v4cEue0HtX5NqeJ6w4aI7E2sDLUAciAjUZ",
+ "U9h/IaKRhxrHIRJuy6GYerERJqgLf/CUP3iKpukff7zpX4O+EjmwC9hUSnMtyh37WTbZEW/M486KIhmI",
+ "3T36B3ncfLbNclXACmTmGVi2UMUuVArqTHAJpKAOBJnT991ynyTSzeipOBVk6n5nnK0wy+lwEYsdO38+",
+ "kHCoW5/zfr3DplEZzadv3pOG59SXVgHrgzjgjHEFxz5vepfmmvvI3i1kpWzzYE6L+oMR/cGIbiXcTD48",
+ "U+SbpPZBuYf54M6ehzTCqUID3A5BmaKjfNLjeycbP9R/UvoOBbRDwaIP5KnZR/MfLOIPFnE7FvEdJA4j",
+ "nlrPNBJEd5w+NJVhoMt80S+qj48coXldch056B4yc5zhiN648TG4xsdW6pK4Ip2OSwZbQX4MiQ28Wz3v",
+ "D5b3B8v792F5Z4cZTVcwubVmdAm7Da8afcisa1uo6+idA2EhH6ShHdh9rE3/79NrLmy2VNqnR8Kik8PO",
+ "Fnh56nOh935t048OvmBO1ejHOOgo+etpU9M3+bH/RJL66p8IRhqFuIXwuX0ujZ8fkbU3D49v3jm2jBXj",
+ "PNdvX9Oenp5iypG1MvZ09mH+vvfSFn9815DA++au8KTw4d2H/xcAAP//NCrlReDiAAA=",
}
// GetSwagger returns the content of the embedded swagger specification file
diff --git a/daemon/algod/api/server/v2/generated/participating/public/routes.go b/daemon/algod/api/server/v2/generated/participating/public/routes.go
index ade11ffe9..1b23d9839 100644
--- a/daemon/algod/api/server/v2/generated/participating/public/routes.go
+++ b/daemon/algod/api/server/v2/generated/participating/public/routes.go
@@ -177,192 +177,201 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+y9fXPcNpIw/lXwm7sq27qhJL8ku1ZV6n6ynWR1sR2XpWR3z/KTxZA9M1iRAAOAo5n4",
- "8Xd/Cg2ABElwhiMp9ubOf9ka4qXRaDT6Dd0fJqkoSsGBazU5+TApqaQFaJD4F01TUXGdsMz8lYFKJSs1",
- "E3xy4r8RpSXji8l0wsyvJdXLyXTCaQFNG9N/OpHwa8UkZJMTLSuYTlS6hIKagfWmNK3rkdbJQiRuiFM7",
- "xNmLycctH2iWSVCqD+WPPN8QxtO8yoBoSbmiqfmkyDXTS6KXTBHXmTBOBAci5kQvW43JnEGeqUO/yF8r",
- "kJtglW7y4SV9bEBMpMihD+dzUcwYBw8V1EDVG0K0IBnMsdGSamJmMLD6hloQBVSmSzIXcgeoFogQXuBV",
- "MTl5N1HAM5C4WymwFf53LgF+g0RTuQA9eT+NLW6uQSaaFZGlnTnsS1BVrhXBtrjGBVsBJ6bXIXlVKU1m",
- "QCgnb797Th4/fvzULKSgWkPmiGxwVc3s4Zps98nJJKMa/Oc+rdF8ISTlWVK3f/vdc5z/3C1wbCuqFMQP",
- "y6n5Qs5eDC3Ad4yQEOMaFrgPLeo3PSKHovl5BnMhYeSe2MZ3uinh/J91V1Kq02UpGNeRfSH4ldjPUR4W",
- "dN/Gw2oAWu1LgylpBn13nDx9/+Hh9OHxx397d5r8t/vzq8cfRy7/eT3uDgxEG6aVlMDTTbKQQPG0LCnv",
- "4+Otowe1FFWekSVd4ebTAlm960tMX8s6VzSvDJ2wVIrTfCEUoY6MMpjTKtfET0wqnhs2ZUZz1E6YIqUU",
- "K5ZBNjXc93rJ0iVJqbJDYDtyzfLc0GClIBuitfjqthymjyFKDFw3wgcu6F8XGc26dmAC1sgNkjQXChIt",
- "dlxP/sahPCPhhdLcVWq/y4pcLIHg5OaDvWwRd9zQdJ5viMZ9zQhVhBJ/NU0Jm5ONqMg1bk7OrrC/W43B",
- "WkEM0nBzWveoObxD6OshI4K8mRA5UI7I8+eujzI+Z4tKgiLXS9BLd+dJUKXgCoiY/RNSbbb9v85/fE2E",
- "JK9AKbqANzS9IsBTkUF2SM7mhAsdkIajJcSh6Tm0DgdX7JL/pxKGJgq1KGl6Fb/Rc1awyKpe0TUrqoLw",
- "qpiBNFvqrxAtiARdST4EkB1xBykWdN2f9EJWPMX9b6ZtyXKG2pgqc7pBhBV0/c3x1IGjCM1zUgLPGF8Q",
- "veaDcpyZezd4iRQVz0aIOdrsaXCxqhJSNmeQkXqULZC4aXbBw/h+8DTCVwCOH2QQnHqWHeBwWEdoxpxu",
- "84WUdAEByRySnxxzw69aXAGvCZ3MNviplLBiolJ1pwEYcertEjgXGpJSwpxFaOzcocMwGNvGceDCyUCp",
- "4JoyDplhzgi00GCZ1SBMwYTb9Z3+LT6jCr5+MnTHN19H7v5cdHd9646P2m1slNgjGbk6zVd3YOOSVav/",
- "CP0wnFuxRWJ/7m0kW1yY22bOcryJ/mn2z6OhUsgEWojwd5NiC051JeHkkh+Yv0hCzjXlGZWZ+aWwP72q",
- "cs3O2cL8lNufXooFS8/ZYgCZNaxRhQu7FfYfM16cHet1VK94KcRVVYYLSluK62xDzl4MbbIdc1/CPK21",
- "3VDxuFh7ZWTfHnpdb+QAkIO4K6lpeAUbCQZams7xn/Uc6YnO5W/mn7LMTW9dzmOoNXTsrmQ0HzizwmlZ",
- "5iylBolv3Wfz1TABsIoEbVoc4YV68iEAsZSiBKmZHZSWZZKLlOaJ0lTjSP8uYT45mfzbUWN/ObLd1VEw",
- "+UvT6xw7GZHVikEJLcs9xnhjRB+1hVkYBo2fkE1YtodCE+N2Ew0pMcOCc1hRrg8blaXFD+oD/M7N1ODb",
- "SjsW3x0VbBDhxDacgbISsG14T5EA9QTRShCtKJAucjGrf7h/WpYNBvH7aVlafKD0CAwFM1gzpdUDXD5t",
- "TlI4z9mLQ/J9ODaK4oLnG3M5WFHD3A1zd2u5W6y2Lbk1NCPeUwS3U8hDszUeDUbMvwuKQ7ViKXIj9eyk",
- "FdP4L65tSGbm91Gd/xgkFuJ2mLhQ0XKYszoO/hIoN/c7lNMnHGfuOSSn3b43IxszSpxgbkQrW/fTjrsF",
- "jzUKryUtLYDui71LGUclzTaysN6Sm45kdFGYgzMc0BpCdeOztvM8RCFBUujA8CwX6dVfqFrewZmf+bH6",
- "xw+nIUugGUiypGp5OIlJGeHxakYbc8RMQ1TwySyY6rBe4l0tb8fSMqppsDQHb1wssajHfsj0QEZ0lx/x",
- "PzQn5rM524b122EPyQUyMGWPs3MyZEbbtwqCnck0QCuEIIVV8InRuveC8nkzeXyfRu3Rt9am4HbILQJ3",
- "SKzv/Bg8E+sYDM/EuncExBrUXdCHGQfFSA2FGgHfCweZwP136KNS0k0fyTj2GCSbBRrRVeFp4OGNb2Zp",
- "jLOnMyFvxn06bIWTxuRMqBk1YL7TDpKwaVUmjhQjZivboDNQ4+XbzjS6w8cw1sLCuaa/AxaUGfUusNAe",
- "6K6xIIqS5XAHpL+MMv0ZVfD4ETn/y+lXDx/98uirrw1JllIsJC3IbKNBkftONyNKb3J40F8ZakdVruOj",
- "f/3EGyrb48bGUaKSKRS07A9lDaBWBLLNiGnXx1obzbjqGsAxh/MCDCe3aCfWtm9Ae8GUkbCK2Z1sxhDC",
- "smaWjDhIMthJTPsur5lmEy5RbmR1F6osSClkxL6GR0yLVOTJCqRiIuJNeeNaENfCi7dl93cLLbmmipi5",
- "0fRbcRQoIpSl13w837dDX6x5g5utnN+uN7I6N++YfWkj31sSFSlBJnrNSQazatHShOZSFISSDDviHf09",
- "aBQFLlgB55oW5Y/z+d2oigIHiqhsrABlZiK2hZHrFaSC20iIHdqZG3UMerqI8SY6PQyAw8j5hqdoZ7yL",
- "YzusuBaMo9NDbXgaaLEGxhyyRYssb6+tDqHDTnVPRcAx6HiJn9HQ8QJyTb8T8qKxBH4vRVXeuZDXnXPs",
- "cqhbjDOlZKav16EZX+Tt6JuFgf0wtsbPsqDn/vi6NSD0SJEv2WKpA7XijRRifvcwxmaJAYofrFKWmz59",
- "1ey1yAwz0ZW6AxGsGazhcIZuQ75GZ6LShBIuMsDNr1RcOBuI10BHMfq3dSjv6aXVs2ZgqCullVltVRL0",
- "3vbui6ZjQlN7QhNEjRrwXdVOR9vKTmdjAXIJNNuQGQAnYuYcRM51hYuk6HrWXrxxomGEX7TgKqVIQSnI",
- "EmeY2gmab2evDr0FTwg4AlzPQpQgcypvDezVaiecV7BJMFBCkfs//KwefAZ4tdA034FYbBNDb63mOy9g",
- "H+px028juO7kIdlRCcTfK0QLlGZz0DCEwr1wMrh/XYh6u3h7tKxAoj/ud6V4P8ntCKgG9Xem99tCW5UD",
- "4X9OvTUSntkwTrnwglVssJwqnexiy6ZRSwc3Kwg4YYwT48ADgtdLqrT1ITOeoenLXic4jxXCzBTDAA+q",
- "IWbkn70G0h87NfcgV5Wq1RFVlaWQGrLYGjist8z1Gtb1XGIejF3rPFqQSsGukYewFIzvkGVXYhFEde1q",
- "cUEW/cWhQ8Lc85soKltANIjYBsi5bxVgNwyBGgCEqQbRlnCY6lBOHXc1nSgtytJwC51UvO43hKZz2/pU",
- "/9S07RMX1c29nQlQGHnl2jvIry1mbfDbkiri4CAFvTKyB5pBrLO7D7M5jIliPIVkG+WjimdahUdg5yGt",
- "yoWkGSQZ5HTTH/Qn+5nYz9sGwB1v1F2hIbFRTPFNbyjZB41sGVrgeComPBL8QlJzBI0q0BCI671j5Axw",
- "7BhzcnR0rx4K54pukR8Pl223OjIi3oYroc2OO3pAkB1HHwPwAB7qoW+OCuycNLpnd4q/g3IT1HLE/pNs",
- "QA0toRl/rwUM2FBdgHhwXjrsvcOBo2xzkI3t4CNDR3bAoPuGSs1SVqKu8wNs7lz1604QdTOSDDRlOWQk",
- "+GDVwDLsT2z8TXfMm6mCo2xvffB7xrfIcnKmUORpA38FG9S539jAzsDUcRe6bGRUcz9RThBQHy5mRPCw",
- "CaxpqvONEdT0EjbkGiQQVc0KprUN2G6rulqUSThA1K+xZUbnxLNBkX4HxngVz3GoYHn9rZhOrE6wHb6L",
- "jmLQQofTBUoh8hEWsh4yohCMivcgpTC7zlzsuI8e9pTUAtIxbfTg1tf/PdVCM66A/F1UJKUcVa5KQy3T",
- "CImCAgqQZgYjgtVzusiOBkOQQwFWk8QvBwfdhR8cuD1niszh2j+4MA276Dg4QDvOG6F063DdgT3UHLez",
- "yPWBDh9z8TktpMtTdkcWuJHH7OSbzuC1l8icKaUc4Zrl35oBdE7meszaQxoZF1WB447y5QRDx9aN+37O",
- "iiqn+i68VrCieSJWICXLYCcndxMzwb9d0fzHutsOna6JAmNFARmjGvINKSWkYKPzjaim6rEPiY3bS5eU",
- "L1BCl6JauMAxOw5y2EpZW4iseG+IqBSj1zxBq3KM47pgYf9Aw8gvQI0O1TVJW43hmtbzuTc5Y65Cv3MR",
- "E33UKzWdDKqYBqmrRsW0yGm/MhnBfVsCVoCfZuKRvgtEnRE2+vgKt8VQr9nc38dG3gwdg7I/cRDK1nwc",
- "imYz+m2+uQMpww5EJJQSFN4JoV1I2a9iHr4oc5eG2igNRd90brv+MnD83g4qaILnjENSCA6b6CNqxuEV",
- "foweJ7yXBjqjhDDUtyv0t+DvgNWeZww13ha/uNvdE9p1EanvhLwrH6QdcLQ8PcLlt9O/7aa8qWOS5nnE",
- "l+fem3QZgJrW79uZJFQpkTIUks4yNbUHzbn/3OOUNvrf1FG0d3D2uuN2nFbhU0Y0ykJeEkrSnKHJVnCl",
- "ZZXqS07RKBQsNRJt5LXfYTPhc98kbpeMmA3dUJecYqRZbSqKRkjMIWIX+Q7AWwtVtViA0h3lYg5wyV0r",
- "xknFmca5CnNcEnteSpAY8nNoWxZ0Q+aGJrQgv4EUZFbptriNz6mUZnnuPGhmGiLml5xqkgNVmrxi/GKN",
- "w3kvuz+yHPS1kFc1FuK3+wI4KKaSeFTU9/YrBqy65S9d8Co+f7efrc/FjN+8udqgzah50v1/7v/nybvT",
- "5L9p8ttx8vQ/jt5/ePLxwUHvx0cfv/nm/7Z/evzxmwf/+e+xnfKwxx77OMjPXjhV9OwF6huN06UH+ycz",
- "uBeMJ1EiC8MnOrRF7uPDVkdAD9rWKL2ES67X3BDSiuYsM7zlJuTQvWF6Z9Gejg7VtDaiY33ya91Tir8F",
- "lyERJtNhjTeWovqBhPFndegFdC/l8LzMK2630kvf9tWID+gS82n9dNJmVTkh+K5uSX00ovvz0VdfT6bN",
- "e7j6+2Q6cV/fRyiZZevYq8cM1jHlzB0QPBj3FCnpRoGOcw+EPRq7ZoMpwmELMFq9WrLy03MKpdkszuF8",
- "LL4z8qz5GbdB8ub8oE9x41wVYv7p4dYSIINSL2PZFlqCGrZqdhOgE+dRSrECPiXsEA67RpbM6Isuii4H",
- "OsdX/6h9ijHaUH0OLKF5qgiwHi5klCUjRj8o8jhu/XE6cZe/unN1yA0cg6s7Z+1A9H9rQe59/+0FOXIM",
- "U92zD3Dt0MGTyYgq7V4FtSKADDezOWaskHfJL/kLmDPOzPeTS55RTY9mVLFUHVUK5DOaU57C4UKQE//Q",
- "6AXV9JL3JK3BNFDBEy9SVrOcpeQqVEga8rSpPfojXF6+o/lCXF6+7wVD9NUHN1WUv9gJEiMIi0onLjFB",
- "IuGaypizSdUP03Fkm3lk26xWyBaVtUj6xAdu/DjPo2Wpug9U+8svy9wsPyBD5Z5fmi0jSgvpZREjoFho",
- "cH9fC3cxSHrt7SqVAkX+UdDyHeP6PUkuq+Pjx0BaLzb/4a58Q5ObEkZbVwYf0HaNKrhwq1bCWkualHQR",
- "82ldXr7TQEvcfZSXC7Rx5DnBbq2Xoj4SHodqFuDxMbwBFo69X73h4s5tL5+EKr4E/IRbiG2MuNF42m+6",
- "X8Hb0RtvV+f9aW+XKr1MzNmOrkoZEvc7U+emWRghy4c/KLZAbdWl8ZkBSZeQXrn8KlCUejNtdfcRNk7Q",
- "9KyDKZt5x778wtwP6BGYAanKjDpRnPJN9xG+Aq19HO9buILNhWhSR+zz6r79CFwNHVSk1EC6NMQaHls3",
- "RnfzXRgXKvZl6d9S46M6TxYnNV34PsMH2Yq8d3CIY0TReqQ8hAgqI4iwxD+Aghss1Ix3K9KPLc9oGTN7",
- "80Wy8HjeT1yTRnlyEVfhatDqbr8XgGm8xLUiM2rkduEyUNmHzgEXqxRdwICEHDplRj4nbjlycJBd9170",
- "phPz7oXWu2+iINvGiVlzlFLAfDGkgspMJ87Oz2T9fs4zgYklHcJmOYpJdUCiZTpUtpxjNlPeEGhxAgbJ",
- "G4HDg9HGSCjZLKnyybEwh5g/y6NkgN/x4f62dC1nQYhYkCisTsbieW73nPa0S5e0xWdq8elZQtVyRKoV",
- "I+FjVHpsOwRHASiDHBZ24baxJ5QmiUCzQQaOH+fznHEgSSzaLDCDBteMmwOMfHxAiLXAk9EjxMg4ABv9",
- "2TgweS3Cs8kX+wDJXRIE6sdGT3jwN8Tfa9n4ayPyiNKwcDbg1Uo9B6AuRLG+vzqBsjgMYXxKDJtb0dyw",
- "OafxNYP0soag2NrJEeIiKh4MibNbHCD2YtlrTfYquslqQpnJAx0X6LZAPBPrxD7YjEq8s/XM0Hs0JB2f",
- "j8YOps3Pck+RmVhjlA5eLTYEegcsw3B4MAINf80U0iv2G7rNLTDbpt0uTcWoUCHJOHNeTS5D4sSYqQck",
- "mCFyuR+kXLkRAB1jR5O/2Cm/O5XUtnjSv8ybW23apBLzr31ix3/oCEV3aQB/fStMnSTlTVdiidop2sEm",
- "7fwwgQgZI3rDJvpOmr4rSEEOqBQkLSEquYp5To1uA3jjnPtugfECs9BQvnkQRDBJWDCloTGi+ziJz2Ge",
- "pJj8Toj58Op0KedmfW+FqK8p60bEjq1lfvIVYAjwnEmlE/RARJdgGn2nUKn+zjSNy0rtGCmbKpZlcd6A",
- "017BJslYXsXp1c37wwsz7euaJapqhvyWcRuwMsPUxtHIyS1T2+DarQt+aRf8kt7ZesedBtPUTCwNubTn",
- "+IOciw7n3cYOIgQYI47+rg2idAuDDF689rljIDcFPv7DbdbX3mHK/Ng7o3b8u9uhO8qOFF1LYDDYugqG",
- "biIjljAdZAbuP0UdOAO0LFm27thC7aiDGjPdy+Dh86l1sIC76wbbgYHA7hl7DSNBtVPnNQK+zfHcylxz",
- "OAozF+0EdyFDCKdiylco6COqfi23C1cXQPMfYPOzaYvLmXycTm5nOo3h2o24A9dv6u2N4hld89aU1vKE",
- "7IlyWpZSrGieOAPzEGlKsXKkic29PfoTs7q4GfPi29OXbxz4H6eTNAcqk1pUGFwVtiv/MKuyWfoGDojP",
- "gG50Pi+zW1Ey2Pw6tVholL5egkslHUijvZyXjcMhOIrOSD2PRwjtNDk734hd4hYfCZS1i6Qx31kPSdsr",
- "QleU5d5u5qEdiObBxY1LnBrlCuEAt/auBE6y5E7ZTe90x09HQ107eFI415Zk14XN566I4F0XOsY8b0rn",
- "dS8oZqy0VpE+c+JVgZaEROUsjdtY+UwZ4uDWd2YaE2w8IIyaESs24IrlFQvGMs3G5KTpABnMEUWmiqbF",
- "aXA3E65WT8XZrxUQlgHX5pPEU9k5qJjexFnb+9epkR36c7mBrYW+Gf42MkaYrbV74yEQ2wWM0FPXA/dF",
- "rTL7hdYWKfND4JLYw+Efzti7Erc46x19OGq2wYvLtsctLK3T53+GMGyO9d11fbzy6tLGDswRrdPDVDKX",
- "4jeI63moHkceGvn8tAyjXH6D8KFDWJ2ixWJq605TbqiZfXC7h6Sb0ArVDlIYoHrc+cAth4kyvYWacrvV",
- "tmxGK9YtTjBhVOmRHb8hGAdzLxI3p9czGssiaoQMA9Np4wBu2dK1IL6zx72qX1vY2UngS67bMvuIvATZ",
- "vAHsJ6S5ocBgpx0tKjSSAVJtKBNMrf8vVyIyTMWvKbfVV0w/e5RcbwXW+GV6XQuJKSBU3OyfQcoKmscl",
- "hyztm3gztmC2sEilIKhc4QayRZssFbnqH/UbIoeaszk5ngblc9xuZGzFFJvlgC0e2hYzqpCT14aouotZ",
- "HnC9VNj80Yjmy4pnEjK9VBaxSpBaqEP1pnZezUBfA3ByjO0ePiX30W2n2AoeGCy6+3ly8vApGl3tH8ex",
- "C8AVhtnGTTJkJ3917CROx+i3tGMYxu1GPYy+lreV4YYZ15bTZLuOOUvY0vG63WepoJwuIB4pUuyAyfbF",
- "3URDWgcvPLNljZSWYkOYjs8Pmhr+NBB9btifBYOkoiiYLpxzR4nC0FNTlsJO6oezNZJcRmEPl/+IPtLS",
- "u4g6SuSnNZra+y22avRkv6YFtNE6JdTm/chZE73g85yTM59WCFMs15mVLW7MXGbpKOZgMMOclJJxjYpF",
- "pefJn0m6pJKmhv0dDoGbzL5+Ekkr3U5vyvcD/JPjXYICuYqjXg6QvZchXF9ynwueFIajZA+a1x7BqRx0",
- "5sbddkO+w+1DjxXKzCjJILlVLXKjAae+FeHxLQPekhTr9exFj3uv7JNTZiXj5EErs0M/vX3ppIxCyFiu",
- "wOa4O4lDgpYMVhi7F98kM+Yt90Lmo3bhNtB/Xs+DFzkDscyf5Zgi8ExEtFOf6ry2pLtY9Yh1YOiYmg+G",
- "DGZuqClpp5X+9Hz0bqKg4p4ub9juO7bMF48H/KOLiM9MLriBjS/frmSAUIK0+lGSyervgY+dkmdiPZZw",
- "OqfQE8+/AIqiKKlYnv3cvPzsVC2QlKfLqM9sZjr+0tRXqxdn78Bo2r8l5Rzy6HBW3vzFy6URyfmfYuw8",
- "BeMj23YLKdjldhbXAN4G0wPlJzToZTo3E4RYbT+qq4O284XICM7T5Jhrjmu/AEeQJv3XCpSOPVDCDzZw",
- "DG2jhh3YLN0EeIYa6SH53pZQXgJpJRBCTdBnimi/mq7KXNBsihksLr49fUnsrLaPrRJks4QvUBFqr6Jj",
- "EwvSZ44LQfYFf+LPI8aPsz1e26xa6aRO6h17gGpaNGnHWcdPgCpSiJ1D8iIohmrfqpohDD3MmSyMVleP",
- "ZuUjpAnzH61pukS1r8Vah0l+fHp7T5UqKClZl4aqc0riuTNwuwz3NsH9lAijm18zZSvnwgrab17rB+DO",
- "7ODfwLaXJyvOLaUc7nHL1Rkk90W7B85ekd6VEIWsg/g9hX5bHWLfbP/n2Cua4qpbOqBXS9K+oKxL/viK",
- "6CnlgrMUE0zFrmhXYneMn21ELq6uIdcfcXdCI4crWrCgDsVzWBwsYeAZoUNc39AffDWbaqnD/qmxluuS",
- "arIArRxng2zq6244WyPjClyOUCzIHPBJIVu+S+SQUXd4UrtN9iQjfHozoDx+Z769dqYFjEm/YhyVCIc2",
- "J/hZayBWANVG82CaLAQot572+2P1zvQ5xKe4GazfH/qKoTiGdf2ZZVs/d3+oU+/1dl5m0/a5aesSJNU/",
- "t6Kc7aSnZekmHa7KEpUH9JoPIjjivUy8+yhAbj1+ONoWctsaroL3qSE0WKGzG0q8h3uEUVco6VS/MkKr",
- "pShsQWyYWDRLAuMRMF4yDk0928gFkUavBNwYPK8D/VQqqbYi4CiedgE0Rw93jKEp7dwbtx2qmx7KoATX",
- "6OcY3samuMoA46gbNIIb5Zu6jK6h7kCYeI71ux0i+6VSUKpyQlSGrxY6xVNijMMwbl+eqX0B9I9BXyay",
- "3bWk9uTscxMNPUSdVdkCdEKzLJay9Rl+JfiVZBVKDrCGtKpTe5YlSTHvSjsRTZ/a3ESp4KoqtszlG9xy",
- "uqAaUYQawopIfofxoctsg//G8loO74wL9Ng71NBHdWT7ZV/qh07GpF5D04lii2Q8JvBOuT06mqlvRuhN",
- "/zul9Fws2oB84vQT27hcuEcx/vatuTjC7Ay9ZK32aqmTJ2Bgn/A1JFFtrJ/9trkSXmW97K3oUKpr1G03",
- "QAxXm5vi5TcQ3hsk3aD2frUeyqEg33QwJp1q9zpOU7KVBQ2+OLIRQvZtEUIRt84ORQXZoCDzudd7nGTY",
- "k7N1PPFhgFAfbtYH6Acfy0pKypz7vWEWfcy6qPf+O4Qx8bDNBncX4WLJBy12P6yG4r59Mjb83q1GdQXu",
- "yXwpYcVE5R3bPvLJq4T211ZtpzryPrr+vuEVp/q85tBB4+2Fqwpgl+l08h9+tnFyBLiWm38BU25v03t1",
- "rvrSrjVPNU1InVB6VILp1q04JlFhLCeekw1blbZ21AnrkdWLMeJAv+7XdHKW7XVhxvIqTuwosWMXr+I1",
- "nHaqSTWFR6wUijV53WPlvUaGGF5gha4gbVZ/LB/fs4JUYzL/Jm5BAuyTRMtMFhQM/ZJ+akCdriMxXdap",
- "bamm+hn8d9zxvddgwYtGm/38cHxipdM6Og35NGZDXgB3NTvb7zxGR5vP55Bqttrx+u6vS+DBy66pt8vY",
- "2tvBYzxWRy9j8pb9rY4NQNsex22FJ0iieGtwht7eXMHmniItaoimY5/6q/YmeTsQA8gdEkMiQsWiP6wh",
- "2TnkmaopA7Hgo61sd2gyoA1Wcgrekt5wLk+S5uJo3pdumTJeSmbUXKbrXq+uMRB36IFevxLFsP7xAgt/",
- "qLrKos/7EWrp5KyfHfHa5Q3Bt5K178RnEAHlf/MPo+0sObuCsNYUeqquqcx8i6jpxVt1ki33Ue9Vna+i",
- "0AV6Xs/MmtjY/juqSL4tjIBOc2HEiGQojLwdjlrHctxTNujGpn/HQFsD1xykq8mH8m8uFCRa+FjabXBs",
- "Q4WNLLoREtRgjksL3GDmmbdNah3M9Usx0wx1AUXhAomEghroZJAAZ3jObch+br/7h0M+1+tOC1NNr7uL",
- "DvioaKZ6SAypfk7cbbn7QdJNjE2Mc1v3WcWy4XCQbW9IKUVWpfaCDg9GbZAbnWtqCyuJ2mnS/io7OkLw",
- "qvMKNkdWCfLVGvwOhkBbycmCHmRR6GzynZrfVAzuxZ2A9zktV9NJKUSeDDg7zvopfLoUf8XSK8iIuSl8",
- "9OBA5RtyH23stTf7ernxKWvKEjhkDw4JOeU2Xts7tts5pDuT83t62/xrnDWrbFYtZ1Q7vOTxwFfMdyVv",
- "yc38MNt5mALD6m45lR1kR4KY9UD6IEmvI3WgDsdq5X1Xc7c2T0NUFoqYTNKUndkRJ1OHyDSVP5owmb50",
- "kOfiOkEqSur8XzGdw7RrM0mf8bTpZrA9gyDehip3gW7IkmYkFVJCGvaIP3GwQBVCQpILDL+JeQbn2shD",
- "BcY1c5KLBRGlUXNtGj3vQ4mWpQnmss9sbc/EOmoGEhmAcs9q3TS2cX+eLdVr9q+Mc7GM2FsQ0R7Le5e/",
- "cYSyd9WKAMwRBLrb1nQaq+7TXle3PtRQtTYtCpbG0f3HijIZjA3ZUbsosr6aHF1pJf8qcABXUZftdg+p",
- "rUM3G+snrXMmjzwWAQDDntMWDKP8p/uCMce6jgmNIPmsllqnrbK7rHP2fT47S+MptVrrEogZu5LgXqnZ",
- "AnSdyjkl1Ut/i5nmfd3S6Cmg8AmZLf9BlbWEeIuMq37XFQ9EmeSwgpZD2T2dq9IUlGIrCCvn2c4kAyjR",
- "PtmVmmOe0pDLdUQpt/Yk8LWNwW5UtrKItTtFdghOUTFvzRN7TNTYo2QgWrGsoi38qVvUIhsqQxZhwx7W",
- "kZxibyYRX9w2FrEztgFpPnoueTy0IXy5WRtFcLasNp5aImxOtirpNR9WIiJ2p9rffvt1EByMqM5L6sEr",
- "X9a7clMFcpAythFGr35gVOZQ4Ou/hklPvLjl+kZkLGvqYioyAFPNecboPWiiw4JmBd2QjM3nIK0xX2nK",
- "MyqzsDnjJAWpKTOazUbdXKw10MoKpjslW8NdcVDPYGIyLtqlLCD5xqkMt5A60XMTkTjtVavFUInE3q7E",
- "nxPQtZGuMa5qgAjcQ2iUre0BExwFJFLQK9hzHsV+g+3TYHoSZ/vTAmcdM0XM13rD3GqjWHc/DCFyuwXF",
- "ELd7hsLUi82bLmmjWdCS7C/ILo2/ai7OcWUZfYcd4IUOw6Awo7fdOHA+8+OoVzVSgqW8H6KE1vJ3+SDd",
- "AhtJI9gixwi0BpsI1wbUt/clcDCr57XfdqiGaNe9i3kWBbdF/npuYcubbNW+gHDMWZArmn961y4m4DxF",
- "fED2dtgYHPoGQyRbVKqbvUx4SUfNHfgB725q/gZd0X8Fs0dRrdQN5USYWqz3wTx4s9DcGi7mvoTXCji5",
- "xjFtHNvDr8nMvdwuJaRMdUWja19do3aFYbEp9xpkrXf43nat82ehb0HGc69pkNdNpn7U8Re8gbA5op+Z",
- "qQyc3CiVx6ivRxYR/MV4VJhCbcd1cdUKcLOVTzovN4SEOw50C0LW9wx06yeHG7s8G8xlLp1KQX+do2/r",
- "Fm4jF3WztrFRmn3kbkvnPia4Ml6lwXTH6E6LECxxQhBU8o+H/yAS5ljDUJCDA5zg4GDqmv7jUfuzOc4H",
- "B1Hp7JPFdVocuTHcvDGK+XnopZ99zTbwqLSzHxXLs12E0Xoi3FQBxUewv7hEBJ+lDukvNtakf1RdLbhb",
- "BMhZxETW2po8mCp4/Dvi3a/rFnnli36ctJJMbzA/orcfsF+iEajf19FMLhqu1g/d3afFFdQZNpvYp0r5",
- "2/V7QXO8j6zays0tJPJD8u2aFmUO7qB8c2/2J3j85yfZ8eOHf5r9+fir4xSefPX0+Jg+fUIfPn38EB79",
- "+asnx/Bw/vXT2aPs0ZNHsyePnnz91dP08ZOHsydfP/3TPcOHDMgW0InPxjP5GxbrTU7fnCUXBtgGJ7Rk",
- "P8DG1gU0ZOwrDtIUTyIUlOWTE//T/+9P2GEqimZ4/+vEJfuYLLUu1cnR0fX19WHY5WiBwQ6JFlW6PPLz",
- "9EoSnr45q71E1gqEO2rfyXrrnieFU/z29tvzC3L65uwwqFd/Mjk+PD58iOXNS+C0ZJOTyWP8CU/PEvf9",
- "yBHb5OTDx+nkaAk0x9hA80cBWrLUf5JAs437v7qmiwXIQ1eG0fy0enTkxYqjDy7o4+O2b0dhRZOjD63Y",
- "mGxHT6x4cPTBJ/Lb3rqVKc/FBJmlRxX970G7MFClw3JLLaPEbOPDWqZECel85aVkwpwqLOWdQSqB4hkQ",
- "Eh82a1nx1Jop7BTA8b+vTv+GpppXp38j35DjqXvvrlDtiE1vPcE1OZxlFuy+dUo925zWUVZBmu+TdzHH",
- "Taw8JB4nQysBtdcjNtwM7TZhUd2aNxt+e5w8ff/hqz9/jMl8/bLkHklB4FGIei18sjtEWkHX3wyhbO3s",
- "4GbcXyuQm2YRBV1PQoD7doxINPacLdD+5NNRtsqEuiqBTJH/Ov/xNRGSOB33DU2vaueVARlzuEmxYvgq",
- "OAuekpueQxC76y8E2td1cl6wQi3K9sPEGs3vMUEWAoqH/tHx8S1Kx0cIzVXktl4TV76q7aJXBNY01fmG",
- "UBVY6FQ1a5LZdVyMokxa5vyou3F4Rl8aJuYL2TdKIPJyHkuwbIfvopP4q4UO5/nCUlS7oyV7yIhC8D52",
- "2Ydb62nky+7+z9jdvuxASmHONEOfeXPl+OusBWRTIMSBOxAAdUj+LiqU8GwJQIhl5MUZ0M/j53TxmsGz",
- "gMaBiV8ODroLPzhwe84UmcM1MlnKsWEXHQcHWDP6yZ6sbKs1ufW8cdTZ2We43ma9ous6ESolXPCEY4W6",
- "FZBALXxy/PAPu8Izjo8FjGhKrOj9cTr56g+8ZWfcCDY0J9jSrubxH3Y15yBXLAVyAUUpJJUs35CfeJ39",
- "Jsiq22d/P/ErLq65R4TRKquioHLjhGha85yKB/mItvKfXuRlI2gjF6ULhUF6KKJOWpVY+WLy/qPXAUYq",
- "FtuaHc0w5d/YpqCCxsPaCfoP1NEHtIAP/n7kcpTFP6Inwqq4R/5NSLxlS/H5oNcG1k6PlOp0WZVHH/A/",
- "qHIGYNmMAEd6zY/QnXn0obUa97m3mvbvTfewxaoQGXiAxXxuaw1s+3z0wf4bTATrEiQzdwq+wnG/2teS",
- "R5gBdNP/ecPT6I/9dXQL/cZ+PvrQLjTVQpBaVjoT10Ff9ABY91V/vrr0auvvo2vKtJFf3LMjTATe76yB",
- "5kcux1Dn1+ZZf+8L5ioIfuxIPKWw4a1tZfMtvb5ohaJIG2f4TKCBYIgXrpMZ48ggQgbW2PXsx7720mNb",
- "F0uwDmrvGo2Ih1qQmRQ0S6nC/NIuG1dPbf14S9WoGxZ5FnF8IZhoCei/YDFH/XCnNwTHHSP/BfsSlGVA",
- "OVxZe+DvLDP1IHpGM+LjoRPyiuZmwyEjp04yb2Hj95Z3Pr+A8pklik8mAjzzh08RilH7Ld1NxuONg7R5",
- "Y+57o+AZBrAAnjgWlMxEtvFVRiS91msb499lbkd1uZjoxzswEv5rWwZ3GQS/2OG+2OG+WGq+2OG+7O4X",
- "O9xIO9wXK9UXK9X/SivVPqapmJjpTDPD0iamU6etea1uR5u0FTWLbz8hYrqWyfrVOZg+JOQCkwJQc0vA",
- "CiTNsYKZCrJ8FBgCiQ+RIDu55EkLEhtoaCa+3/zXRnheVsfHj4EcP+j2UZrlecib+31R3sVPNqXgN+Ry",
- "cjnpjSShECvI7POE8Nm07bVz2P+vHvfHXr4FfP6wpCuo3ysRVc3nLGUW5bngC0IXoolONnybcIFfQBrg",
- "bNYqwvTUZYZjilybxbuk9u3X3W3JvS8BnDVbuNOj3yGXuDPfEN6envz/GOPG/18tpd/iCdGtGOnWsXtc",
- "9QtX+RRc5bPzlT+6jzQwH/6PFDOfHD/5wy4oNDa/Fpp8h5H3txPH6kIhseRdNxW0/AtBb+5ronfDaFi8",
- "Res42HfvzUWAlQHdBdsEd54cHWF6oKVQ+mhirr924Gf48X0Nsy/nNCklW2F26Pcf/18AAAD//wqEN9SA",
- "5AAA",
+ "H4sIAAAAAAAC/+y9fXPcNpIw/lXwm7sqx76hJL8kt1ZV6n6yneR0sR2XpWR3z/KTxZA9M1iRABcA5yV+",
+ "/N2fQgMgQRKc4UiKvbnzX7aGeGk0Go3uRr98mKSiKAUHrtXk9MOkpJIWoEHiXzRNRcV1wjLzVwYqlazU",
+ "TPDJqf9GlJaMLybTCTO/llQvJ9MJpwU0bUz/6UTCPyomIZucalnBdKLSJRTUDKy3pWldj7RJFiJxQ5zZ",
+ "Ic5fTD7u+ECzTIJSfSh/4vmWMJ7mVQZES8oVTc0nRdZML4leMkVcZ8I4ERyImBO9bDUmcwZ5po78Iv9R",
+ "gdwGq3STDy/pYwNiIkUOfTifi2LGOHiooAaq3hCiBclgjo2WVBMzg4HVN9SCKKAyXZK5kHtAtUCE8AKv",
+ "isnpu4kCnoHE3UqBrfC/cwnwGySaygXoyftpbHFzDTLRrIgs7dxhX4Kqcq0ItsU1LtgKODG9jsirSmky",
+ "A0I5efv9c/L48eOnZiEF1RoyR2SDq2pmD9dku09OJxnV4D/3aY3mCyEpz5K6/dvvn+P8F26BY1tRpSB+",
+ "WM7MF3L+YmgBvmOEhBjXsMB9aFG/6RE5FM3PM5gLCSP3xDa+000J5/+su5JSnS5LwbiO7AvBr8R+jvKw",
+ "oPsuHlYD0GpfGkxJM+i7k+Tp+w8Ppw9PPv7Lu7Pkv92fXz/+OHL5z+tx92Ag2jCtpASebpOFBIqnZUl5",
+ "Hx9vHT2opajyjCzpCjefFsjqXV9i+lrWuaJ5ZeiEpVKc5QuhCHVklMGcVrkmfmJS8dywKTOao3bCFCml",
+ "WLEMsqnhvuslS5ckpcoOge3ImuW5ocFKQTZEa/HV7ThMH0OUGLhuhA9c0D8vMpp17cEEbJAbJGkuFCRa",
+ "7Lme/I1DeUbCC6W5q9RhlxW5XALByc0He9ki7rih6TzfEo37mhGqCCX+apoSNidbUZE1bk7OrrG/W43B",
+ "WkEM0nBzWveoObxD6OshI4K8mRA5UI7I8+eujzI+Z4tKgiLrJeilu/MkqFJwBUTM/g6pNtv+Xxc/vSZC",
+ "klegFF3AG5peE+CpyCA7IudzwoUOSMPREuLQ9Bxah4Mrdsn/XQlDE4ValDS9jt/oOStYZFWv6IYVVUF4",
+ "VcxAmi31V4gWRIKuJB8CyI64hxQLuulPeikrnuL+N9O2ZDlDbUyVOd0iwgq6+fZk6sBRhOY5KYFnjC+I",
+ "3vBBOc7MvR+8RIqKZyPEHG32NLhYVQkpmzPISD3KDkjcNPvgYfwweBrhKwDHDzIITj3LHnA4bCI0Y063",
+ "+UJKuoCAZI7Iz4654VctroHXhE5mW/xUSlgxUam60wCMOPVuCZwLDUkpYc4iNHbh0GEYjG3jOHDhZKBU",
+ "cE0Zh8wwZwRaaLDMahCmYMLd+k7/Fp9RBd88Gbrjm68jd38uuru+c8dH7TY2SuyRjFyd5qs7sHHJqtV/",
+ "hH4Yzq3YIrE/9zaSLS7NbTNnOd5Efzf759FQKWQCLUT4u0mxBae6knB6xR+Yv0hCLjTlGZWZ+aWwP72q",
+ "cs0u2ML8lNufXooFSy/YYgCZNaxRhQu7FfYfM16cHetNVK94KcR1VYYLSluK62xLzl8MbbId81DCPKu1",
+ "3VDxuNx4ZeTQHnpTb+QAkIO4K6lpeA1bCQZams7xn80c6YnO5W/mn7LMTW9dzmOoNXTsrmQ0HzizwllZ",
+ "5iylBolv3Wfz1TABsIoEbVoc44V6+iEAsZSiBKmZHZSWZZKLlOaJ0lTjSP8qYT45nfzLcWN/Obbd1XEw",
+ "+UvT6wI7GZHVikEJLcsDxnhjRB+1g1kYBo2fkE1YtodCE+N2Ew0pMcOCc1hRro8alaXFD+oD/M7N1ODb",
+ "SjsW3x0VbBDhxDacgbISsG14T5EA9QTRShCtKJAucjGrf/jqrCwbDOL3s7K0+EDpERgKZrBhSqv7uHza",
+ "nKRwnvMXR+SHcGwUxQXPt+ZysKKGuRvm7tZyt1htW3JraEa8pwhup5BHZms8GoyYfxcUh2rFUuRG6tlL",
+ "K6bxf7q2IZmZ30d1/mOQWIjbYeJCRcthzuo4+Eug3HzVoZw+4ThzzxE56/a9GdmYUeIEcyNa2bmfdtwd",
+ "eKxRuJa0tAC6L/YuZRyVNNvIwnpLbjqS0UVhDs5wQGsI1Y3P2t7zEIUESaEDw7NcpNf/SdXyDs78zI/V",
+ "P344DVkCzUCSJVXLo0lMygiPVzPamCNmGqKCT2bBVEf1Eu9qeXuWllFNg6U5eONiiUU99kOmBzKiu/yE",
+ "/6E5MZ/N2Tas3w57RC6RgSl7nN0jQ2a0fasg2JlMA7RCCFJYBZ8YrfsgKJ83k8f3adQefWdtCm6H3CJw",
+ "h8Tmzo/BM7GJwfBMbHpHQGxA3QV9mHFQjNRQqBHwvXCQCdx/hz4qJd32kYxjj0GyWaARXRWeBh7e+GaW",
+ "xjh7NhPyZtynw1Y4aUzOhJpRA+Y77SAJm1Zl4kgxYrayDToDNa98u5lGd/gYxlpYuND0d8CCMqPeBRba",
+ "A901FkRRshzugPSXUaY/owoePyIX/3n29cNHvz76+htDkqUUC0kLMttqUOQrp5sRpbc53O+vDLWjKtfx",
+ "0b954g2V7XFj4yhRyRQKWvaHsgZQKwLZZsS062OtjWZcdQ3gmMN5CYaTW7QTa9s3oL1gykhYxexONmMI",
+ "YVkzS0YcJBnsJaZDl9dMsw2XKLeyugtVFqQUMmJfwyOmRSryZAVSMRF5TXnjWhDXwou3Zfd3Cy1ZU0XM",
+ "3Gj6rTgKFBHK0hs+nu/boS83vMHNTs5v1xtZnZt3zL60ke8tiYqUIBO94SSDWbVoaUJzKQpCSYYd8Y7+",
+ "ATSKApesgAtNi/Kn+fxuVEWBA0VUNlaAMjMR28LI9QpSwa0nxB7tzI06Bj1dxHgTnR4GwGHkYstTtDPe",
+ "xbEdVlwLxvHRQ215GmixBsYcskWLLG+vrQ6hw051T0XAMeh4iZ/R0PECck2/F/KysQT+IEVV3rmQ151z",
+ "7HKoW4wzpWSmr9ehGV/kbe+bhYH9KLbGz7Kg5/74ujUg9EiRL9liqQO14o0UYn73MMZmiQGKH6xSlps+",
+ "fdXstcgMM9GVugMRrBms4XCGbkO+Rmei0oQSLjLAza9UXDgb8NfAh2J839ahvKeXVs+agaGulFZmtVVJ",
+ "8PW2d180HROa2hOaIGrUwNtV/ehoW9nprC9ALoFmWzID4ETM3AORe7rCRVJ8etZevHGiYYRftOAqpUhB",
+ "KcgSZ5jaC5pvZ68OvQNPCDgCXM9ClCBzKm8N7PVqL5zXsE3QUUKRr378Rd3/DPBqoWm+B7HYJobeWs13",
+ "r4B9qMdNv4vgupOHZEclEH+vEC1Qms1BwxAKD8LJ4P51Iert4u3RsgKJ73G/K8X7SW5HQDWovzO93xba",
+ "qhxw/3PqrZHwzIZxyoUXrGKD5VTpZB9bNo1aOrhZQcAJY5wYBx4QvF5Spe0bMuMZmr7sdYLzWCHMTDEM",
+ "8KAaYkb+xWsg/bFTcw9yValaHVFVWQqpIYutgcNmx1yvYVPPJebB2LXOowWpFOwbeQhLwfgOWXYlFkFU",
+ "108tzsmivzh8kDD3/DaKyhYQDSJ2AXLhWwXYDV2gBgBhqkG0JRymOpRT+11NJ0qLsjTcQicVr/sNoenC",
+ "tj7TPzdt+8RFdXNvZwIUel659g7ytcWsdX5bUkUcHKSg10b2QDOIfezuw2wOY6IYTyHZRfmo4plW4RHY",
+ "e0irciFpBkkGOd32B/3Zfib2864BcMcbdVdoSKwXU3zTG0r2TiM7hhY4nooJjwS/kNQcQaMKNATieu8Z",
+ "OQMcO8acHB3dq4fCuaJb5MfDZdutjoyIt+FKaLPjjh4QZMfRxwA8gId66JujAjsnje7ZneKvoNwEtRxx",
+ "+CRbUENLaMY/aAEDNlTnIB6clw5773DgKNscZGN7+MjQkR0w6L6hUrOUlajr/AjbO1f9uhNEnxlJBpqy",
+ "HDISfLBqYBn2J9b/pjvmzVTBUba3Pvg941tkOTlTKPK0gb+GLercb6xjZ2DquAtdNjKquZ8oJwiodxcz",
+ "InjYBDY01fnWCGp6CVuyBglEVbOCaW0dttuqrhZlEg4QfdfYMaN7xLNOkX4HxrwqXuBQwfL6WzGdWJ1g",
+ "N3yXHcWghQ6nC5RC5CMsZD1kRCEY5e9BSmF2nTnfce897CmpBaRj2viCW1//91QLzbgC8ldRkZRyVLkq",
+ "DbVMIyQKCihAmhmMCFbP6Tw7GgxBDgVYTRK/PHjQXfiDB27PmSJzWPuAC9Owi44HD9CO80Yo3Tpcd2AP",
+ "NcftPHJ94IOPuficFtLlKfs9C9zIY3byTWfw+pXInCmlHOGa5d+aAXRO5mbM2kMaGedVgeOOessJho6t",
+ "G/f9ghVVTvVdvFrBiuaJWIGULIO9nNxNzAT/bkXzn+puGEwCqaHRFJIUQyBGjgWXpo+NmtinGzbeZKwo",
+ "IGNUQ74lpYQUrJe/EflUDeMRsf5/6ZLyBUr6UlQL54Bmx0FOXSlrU5EV7w0RlYb0hidonY5xbud07AM9",
+ "jBwE1OhiXdO21TzWtJ7PxfaMuVID5HVN/dHXrelkUFU1SF01qqpFTjtaZQQXbwlqAX6aiUe+gSDqjNDS",
+ "x1e4LeYUmM39fWztzdAxKPsTBy5xzcchrzijJ+fbO5BW7EBEQilB4d0S2peU/SrmYWSau3zUVmko+iZ4",
+ "2/XXgeP3dlDREzxnHJJCcNhGg7EZh1f4MXqc8H4b6IySxlDfrvLQgr8DVnueMdR4W/zibndPaPepSX0v",
+ "5F29ZdoBR8vlI54O976Tuylv+sBJ8zzyJujiVroMQE3rOHkmCVVKpAyFrfNMTe1Bc8+ILsiljf43tTfu",
+ "HZy97ridx68wJBKNu5CXhJI0Z2j6FVxpWaX6ilM0LgVLjXgteS162Nz43DeJ2zcj5kc31BWn6LFWm5yi",
+ "nhZziNhXvgfwVkdVLRagdEdJmQNccdeKcVJxpnGuwhyXxJ6XEiS6Dh3ZlgXdkrmhCS3IbyAFmVW6LbZj",
+ "WJbSLM/dS5yZhoj5Faea5ECVJq8Yv9zgcP613h9ZDnot5HWNhfjtvgAOiqkk7l31g/2Kjq9u+UvnBIth",
+ "9Pazfbsx4zexW1u0PTWh4f/nq/84fXeW/DdNfjtJnv7b8fsPTz7ef9D78dHHb7/9v+2fHn/89v5//Gts",
+ "pzzssaAhB/n5C6fSnr9AvaV5vOnB/skM9wXjSZTIQjeMDm2RrzBA1hHQ/bZVSy/hiusNN4S0ojnLDG+5",
+ "CTl0b5jeWbSno0M1rY3oWLH8Wg/UBm7BZUiEyXRY442lqL5DYjw8D18TXcQdnpd5xe1WeunbRp94xzAx",
+ "n9YhmDY7yynB+Lwl9V6N7s9HX38zmTZxdfX3yXTivr6PUDLLNrHoyQw2MSXPHRA8GPcUKelWgY5zD4Q9",
+ "6gNnnTLCYQsoZiDVkpWfnlMozWZxDud9+p2xaMPPuXW2N+cH3ya37slDzD893FoCZFDqZSxrQ0tQw1bN",
+ "bgJ0/EVKKVbAp4QdwVHXWJMZfdF54+VA55g9ALVPMUYbqs+BJTRPFQHWw4WMsojE6AdFHsetP04n7vJX",
+ "d64OuYFjcHXnrB8i/d9akHs/fHdJjh3DVPdsIK8dOgi9jKjSLrqo5UlkuJnNVWOFvCt+xV/AnHFmvp9e",
+ "8YxqejyjiqXquFIgn9Gc8hSOFoKc+oClF1TTK96TtAbTSQWhYqSsZjlLyXWokDTkaVOE9Ee4unpH84W4",
+ "unrfc6roqw9uqih/sRMkRhAWlU5cgoNEwprK2KOVqgPccWSbwWTXrFbIFpW1bPoECm78OM+jZam6ga79",
+ "5ZdlbpYfkKFyYZxmy4jSQnpZxAgoFhrc39fCXQySrr1dpVKgyN8KWr5jXL8nyVV1cvIYSCvy82/uyjc0",
+ "uS1htHVlMBC3a1TBhVu1EjZa0qSki9jb2NXVOw20xN1HeblAG0eeE+zWijj1HvU4VLMAj4/hDbBwHBw9",
+ "h4u7sL18Mqv4EvATbiG2MeJG82J/0/0KYlBvvF2dONbeLlV6mZizHV2VMiTud6bOcbMwQpZ3o1Bsgdqq",
+ "Swc0A5IuIb12eVqgKPV22uruPXWcoOlZB1M2g4+NIMMcEviyMANSlRl1ojjl224wvwKtvT/wW7iG7aVo",
+ "UlAcEr3fDiZXQwcVKTWQLg2xhsfWjdHdfOcOhop9WfqYbAzO82RxWtOF7zN8kK3IeweHOEYUrWDnIURQ",
+ "GUGEJf4BFNxgoWa8W5F+bHlGy5jZmy+SzcfzfuKaNMqT89wKV4NWd/u9AEwHJtaKzKiR24XLZGUDpgMu",
+ "Vim6gAEJOXzcGRmW3HoQwkH23XvRm07Muxda776JgmwbJ2bNUUoB88WQCiozHX89P5N9P3QvE5ig0iFs",
+ "lqOYVDs2WqZDZeuRzWbcGwItTsAgeSNweDDaGAklmyVVPskW5iLzZ3mUDPA7JgDYlfblPHA1CxKO1Uld",
+ "PM/tntOedumSv/iMLz7NS6hajkjZYiR89G6PbYfgKABlkMPCLtw29oTSJCNoNsjA8dN8njMOJIl5rQVm",
+ "0OCacXOAkY8fEGIt8GT0CDEyDsDGd3EcmLwW4dnki0OA5C6ZAvVj44t68DfE476sH7cReURpWDgbeNVK",
+ "PQegztWxvr86Drc4DGF8SgybW9HcsDmn8TWD9LKPoNjayTXiPDPuD4mzOx5A7MVy0JrsVXST1YQykwc6",
+ "LtDtgHgmNokN/IxKvLPNzNB71LUdw1BjB9PmebmnyExs0NsHrxbrSr0HlmE4PBiBhr9hCukV+w3d5haY",
+ "XdPulqZiVKiQZJw5ryaXIXFizNQDEswQuXwVpG65EQAdY0eTB9kpv3uV1LZ40r/Mm1tt2qQk81FDseM/",
+ "dISiuzSAv74Vpk628qYrsUTtFG2nlXaemUCEjBG9YRP9R5r+U5CCHFApSFpCVHIdezk1ug3gjXPhuwXG",
+ "C8xmQ/n2fuAJJWHBlIbGiO79JD6HeZJiEj0h5sOr06Wcm/W9FaK+puwzInZsLfOTrwBdiedMKp3gC0R0",
+ "CabR9wqV6u9N07is1Pa1silnWRbnDTjtNWyTjOVVnF7dvD++MNO+rlmiqmbIbxm3DiszTJEc9cDcMbV1",
+ "0t254Jd2wS/pna133GkwTc3E0pBLe44/yLnocN5d7CBCgDHi6O/aIEp3MMggcrbPHQO5KXjjP9plfe0d",
+ "psyPvddrx8fvDt1RdqToWgKDwc5VMHwmMmIJ00GG4X5I68AZoGXJsk3HFmpHHdSY6UEGD5+XrYMF3F03",
+ "2B4MBHbPWFSNBNVOwdcI+DZXdCsDztEozFy2E+WFDCGciilf6aCPqDrqbh+uLoHmP8L2F9MWlzP5OJ3c",
+ "znQaw7UbcQ+u39TbG8UzPs1bU1rrJeRAlNOylGJF88QZmIdIU4qVI01s7u3Rn5jVxc2Yl9+dvXzjwP84",
+ "naQ5UJnUosLgqrBd+YdZlc32N3BAfCZ1o/N5md2KksHm1ynKQqP0egkuJXUgjfZyZzYPDsFRdEbqedxD",
+ "aK/J2b2N2CXueCOBsn4iacx39oWk/SpCV5Tl3m7moR3w5sHFjUvAGuUK4QC3fl0JHsmSO2U3vdMdPx0N",
+ "de3hSeFcO5JmFzYvvCKCd5/Q0ed5W7pX94Ji5ktrFekzJ14VaElIVM7SuI2Vz5QhDm7fzkxjgo0HhFEz",
+ "YsUGnmJ5xYKxTLMxuW06QAZzRJGpoul1GtzNhKv5U3H2jwoIy4Br80niqewcVEyT4qzt/evUyA79udzA",
+ "1kLfDH8bGSPM+tq98RCI3QJG+FLXA/dFrTL7hdYWKfND8CRxwIN/OGPvStzxWO/ow1GzdV5ctl/cwhI9",
+ "ff5nCMPmat9fH8grry797MAc0Xo/TCVzKX6DuJ6H6nEkYMnnuWXo5fIbhIEOYZWLFouprTtN2aJm9sHt",
+ "HpJuQitU20lhgOpx54NnOUy46S3UlNuttoEkLV+3OMGEXqXHdvyGYBzMPU/cnK5nNJaN1AgZBqaz5gG4",
+ "ZUvXgvjOHveqjraws5PgLbluy2wwegmyiSXsJ7a5ocBgpx0tKjSSAVJtKBNM7ftfrkRkmIqvKbdVXEw/",
+ "e5RcbwXW+GV6rYXEVBIqbvbPIGUFzeOSQ5b2TbwZWzBboKRSEFTAcAPZ4k+WilwVkTqGyKHmfE5OpkEZ",
+ "HrcbGVsxxWY5YIuHtsWMKuTktSGq7mKWB1wvFTZ/NKL5suKZhEwvlUWsEqQW6lC9qR+vZqDXAJycYLuH",
+ "T8lX+Gyn2AruGyy6+3ly+vApGl3tHyexC8AVmNnFTTJkJ3927CROx/huaccwjNuNehSNurcV5oYZ147T",
+ "ZLuOOUvY0vG6/WepoJwuIO4pUuyByfbF3URDWgcvPLPlkZSWYkuYjs8Pmhr+NOB9btifBYOkoiiYLtzj",
+ "jhKFoaemvIWd1A9nay25zMQeLv8R30hL/0TUUSI/rdHU3m+xVeNL9mtaQButU0Jt/pCcNd4LPl86Offp",
+ "iTBVc52h2eLGzGWWjmIOOjPMSSkZ16hYVHqe/ImkSyppatjf0RC4yeybJ5H01O00qfwwwD853iUokKs4",
+ "6uUA2XsZwvUlX3HBk8JwlOx+E+0RnMrBx9z4s93Q2+HuoccKZWaUZJDcqha50YBT34rw+I4Bb0mK9XoO",
+ "oseDV/bJKbOScfKgldmhn9++dFJGIWQs52Bz3J3EIUFLBiv03Ytvkhnzlnsh81G7cBvoP+/Lgxc5A7HM",
+ "n+WoIrAqfvFm2UGffSPC//LKlVPsyd4DfgbWkaDu84ljEaIuSVZCQzc+gqsmf3v4NyJh7gokPniAQD94",
+ "MHXC3N8etT9bJvXgQTwTT9SmYX5tsHAQK+xmKjB9Y3v4TEQsDD7tff0a4uINIhaeIVZrPpijPHNDTUk7",
+ "xfinvwvvxpMt/loZPwVXV+/wi8cD/tFFxGc+8riBjT+GXckAoQQlFqIkk9XfAz8JSp6JzVjC6XBSTzz/",
+ "BCiKoqRiefZLE73bYW2S8nQZffecmY6/NrX26sXZwxtNAbmknEMeHc7qDL963SKi/fxdjJ2nYHxk225R",
+ "DbvczuIawNtgeqD8hAa9TOdmghCr7cDI2vE+X4iM4DxNvsHmuPaLsQQp8/9RgdKxCws/WOc/tG8bdmAz",
+ "thPgGVoVjsgPtpz2EkgrmRRq8z7bRzvyvSpzQbMpZiG5/O7sJbGz2j62YpTNGL9AZba9io5dM0ilOs6N",
+ "3Bd/ioe4jB9nt8+9WbXSSZ3gPRZEbFo0KehZ560H1dwQO0fkRVAY18YbmyEIJqGRhdHM69GsjIs0Yf6j",
+ "NU2XqLq3WOswyY8vdeCpUgXlResyYXV+UTx3Bm5X7cAWO5gSoZcg10zZKsqwgnbcch3E70xHPo65vTxZ",
+ "cW4p5eiAW67OJnoo2j1w9or0z0FRyDqIP1Bxs5VCDq38cIG9ounOumUkenVFbRRsXf7JV8dPKRecpZhs",
+ "LHZFu3LLY95KR+Rl6xrj/RF3JzRyuKLFK2p3SofFwXIWnhE6xPUfa4KvZlMtddg/Ndb1XVJNFqCV42yQ",
+ "TX0NFmcvZlyByxeLxbkDPilk6/0ZOWTUpSGpn74OJCMMnxowAHxvvr125iGMK7hmHBVBhzYn+FmLLlaD",
+ "1UZ7ZJosBCi3nnYMuXpn+hxhOHUGm/dHvnosjmGfb82yra9Cf6gz77ngPAVM2+emrUtyVf/c8lS3k56V",
+ "pZt0uEJPVB7QGz6I4MgLdOKfAAPk1uOHo+0gt50uR3ifGkKDFTosQIn3cI8w6mo1nUpoRmi1FIUtiHX1",
+ "i2a6YDwCxkvGoaltHLkg0uiVgBuD53Wgn0ol1VYEHMXTLoHmVqGOMDSl3RPVbYfqpvgyKME1+jmGt7Ep",
+ "tDPAOOoGjeBG+bYuqWyoOxAmnmMtd4fIftkclKqcEJVh5EmnkE6McRjG7Ut1tS+AAT2/JRPZ7pjv7tCb",
+ "aCiYeFZlC9AJzbJY+t5n+JXgV5JVKDnABtKqTvNaliTF3DntZEJ9anMTpYKrqtgxl29wy+mCylQRagir",
+ "Y/kdxmCl2Rb/jeU4Hd4Z56xzsLuo98zJDsug1Xd/jUm9hqYTxRbJeEzgnXJ7dDRT34zQm/53Sum5WLQB",
+ "+RxmuwEuF+5RjL99Zy6OMMNGL3GvvVrqBBjonCl8PVFUG+vQ7TZXwqusl8kXHwXreoW7DRDDlQenePkN",
+ "uGiHRlh7v1rD5JCjdjoYV0C1i3DUlOxkQYNRY9bLq2PW7VvYhzy7rGPX3ZlD3Vp3ItS7DPYB+tH7I5OS",
+ "MudC0TCLPmZd5EI/lmSMT3Ozwd1FuHiAQYvdj6sh332fUA+/dyuTXYNLe1BKWDFReecE773mVUL7a6vO",
+ "Vx09EV1/3/CKU31ec+ig8fbSVYiwy3Q6+Y+/WF9HAlzL7T+BKbe36b2aZ31p15qnmiakTi4+Ktl461Yc",
+ "k2wyltfQyYatqmt7asb1yOrFGHGgXwNuOjnPDrowY7kxJ3aU2LGLV3QbTh3WpAvDI1YKxZoc/7FSbyPd",
+ "RC+xWluQ+qw/lvfRWkGqsbBD43siAQ5JhGYmC4rHfkkhNqBO1960LnPYrnRh/WoOe+74XkRfEJVqM+Ef",
+ "jU+OdVZ7GCKfxozWC+Cufms7Vmd0xMB8Dqlmqz0RlH9eAg+i86beLmPrsAcBlaz2QMcEPIdbHRuAdgU4",
+ "7oQnSIR5a3CG4qeuYXtPkRY1RFPzT/1Ve5PcK4gB5A6JIRGhYh481pDsnCqYqikDseA95mx3aLLYDVb1",
+ "CuKBbziXJ0lzcTQxwjumjJcVGjWX6XpQ5Dw6Uw8FWfarkgzrHy+wCIyqK2763C2hlk7O+xku1y73C8a7",
+ "1m8nPgsMKP+bD263s+TsGsK6Y/hStaYy8y2iphdv1Ul23Ee9yEhfUaML9LyemTX+zf1YuEjONPRiT3Nh",
+ "xIhkKBSg7VJc++PcU9ZxyqbwR2dpA9ccpKvPiPJvLhQkWnh/6F1w7EKF9Q67ERLUYJ5SC9xg9qC3TXok",
+ "zNdMMVsQdU5h4QKJhIIa6GSQxGh4zl3Ifm6/++Avn693r4Wpptf9hSO8ZztTPSSGVD8n7rbcH1R2E2MT",
+ "49zWAFexjEYcZPs1pJQiq1J7QYcHozbIjc4XtoOVRO00aX+VHR0hiMy9hu2xVYJ8xQ2/gyHQVnKyoAeZ",
+ "MDqbfKfmNxWDe3En4H1Oy9V0UgqRJwOPHef9NExdir9m6TVkxNwU3gN0oAoS+Qpt7PVr9nq59WmHyhI4",
+ "ZPePCDnj1ufeP2y384B3Juf39K75NzhrVtnMaM6odnTF487LmLNM3pKb+WF28zAFhtXdcio7yJ4kP5uB",
+ "FFCSriM1wY7GauX9p+ZunaaGqCwUMZnkwr5YPceDHjMcrSXT4Bwb7CVuNpK4ly6ichFzEoT1uPj92qHU",
+ "7EguBi7ucDIESAMfE+dZQ+EGjyKgrsG0x1Go9hFqytc0fkJ98SjPxTrBY5TUSexiSpdp174lfNreppsh",
+ "txkEDkdUOQliS5Y0I6mQEtKwRzxOxwJVCAlJLtD/KPY0OtdGICzQOZ+TXCyIKI2eb3NB+kekaG2lYK67",
+ "qiNlY84tBIl98RrI6gHKxZg7cG3jPrw7SjkdXibqchkxXOGG+d06uBaUI7iDS7gEYI4g9P1Gu7NYqav2",
+ "urpF14ZKIGpRsDSO7j+Wu86gk02MemOocFmUbRQnNsMDHvKU+nUWT08fzcDpLI/yanf83CsV0rn5L17h",
+ "3XHJHBxzGeBnkZrNlg0n6eBl0QEAIbWhRbqSNvVyyMrrgm5iYUMR8Y2tC+hIhoOuDLeDzYxwl0B93E0o",
+ "sYpvkYNQ744rSOdjqQcOVdRJYrdPgq0COhvrmVBnmh/JPwMAhn0VWjCM8lg4FIw5VtVNaATJ57WeOG0V",
+ "PWedS8JnAbXMMKXWTrQEYsauJLjYXlv+s1NvrKR66eVG07xvzeEZbEBh4K0tmkSVtT16G6irPdoVyEWZ",
+ "5LCClguHCziu0hSUYisI65baziQDKPFFoKunxnwTwuuwo7y4tSfB6/YY7Ea1GYtYu1Nkj6oSVaw2PLHH",
+ "RI09SgaiFcsq2sKfukUFx6HijZH72sP6fhynOJhJxBe3i0Xs9SZCmo+eSx53Jgrj3WszJM6W1c8Vlgib",
+ "k61KuubDanufKBtxc3zt0wCx320gxau77S1ze5wQHIyoTi6LQTlT1jt8U/PPIJXtIrJeJdi4Hga+kneY",
+ "dsrrCq5v5Gq0hmqmIgMw1fAG9L2FxrczaFbQLcnYfA7SPsUpTXlGZRY2Z5ykIDVlnKzpVt1cJzPQygqm",
+ "e9Uyw6lxUM+sYgoaWpUtIPnWKfxDKtMIVQffXSNqjr22tRgqUtvblXgwEN0Y1RC9IgeIwKWiQMXQHlbB",
+ "USonBb2GA+dR7DfYPQ0miHKWey1w1jFTfNxJ6z8h6vDA/8yZ3kntVt7ruqnad0RLjJ4G+aJxZrCb06fB",
+ "mGfxpS2VFnoXdyuP+L22Rk07HwxkUm2L6QO7iGYd55YeyuRqvLrashzF/JctD0+Qt6sd7gqgglptqTM3",
+ "98WS3qVgkTJ13t8HSi1WXaBZxoZK4y/BpSt3Z6s9bW0CNOOMt3QH9q44RKUok3TMG1YGORhWY7UWB2kb",
+ "xhE2sjLdcy1EL8kBrtRWkcQc+QMeCysaoLdPfSFOu35obSGgPnhYdzmtJIqxa7rdnxKzEQTiLvx2ZK+D",
+ "e8+kGmq3wfaIK1vKJ5px8hABMcJ1YtVs+rn+7n4xNjaleT3//Zbj3sfiCzjjTlHCGoW76K1RpTypRGiN",
+ "8m2MafgXoBsscEg+HOFdfWdbVZ+W32ODopfkzVJAjwKt72kbwWZQs32381OYIb5JWyCtwzY6S3iNtMsv",
+ "XjWa6rjq8b7DHvBCn7igfrx/nnTgfOb4/1c1UoKlvB+ihNby97nZuQU2qn2wRU5a1hpsvQ4bM9rel8CH",
+ "Uj2vXRMHruaeByOmgzfiWZ5HPB+tAG+LiweEY+5FuaL5p/dexDoBZ4gPyN4O+zuE7m8hki0q1c2Cb1/S",
+ "UXMHrm53NzV/g96WfwazR9FrwQ3lbAY95o/qF83t09TcVxpeASdrHNNabB9+Q2YuwVQpIWWqa4tY+yKA",
+ "tbcX1sR1Ac8bvce9bN86fxH6FmQ896Y98ropKIavLwveQNgc0c/MVAZObpTKY9TXI4sI/mI8Ksz0vOe6",
+ "uG7FcDRSXXCjCQl3HMsRRGUeGMvRz2E9dnk2XsFcOpWC/jpH39Yt3EYu6mZtYwORRmeDwmpPY+KH4pmb",
+ "THcMYLqTFE4HJXD6HUKXLI7cGG7eGMX8MpTMwiZsGMib0tmPiuXZPsJoZcH5WNfIxzwvv7p8aZ/2LvUQ",
+ "WHfq/lF1JatvEQNiERNZa2vyYKogv82I1DauWySRDboqpZVkeotp3L3Gy36NBln9UDvsu4CP2ojq7j4t",
+ "rqEuBNC491fK364/CJrjfWRtu9zcQiI/It9taFHmziZCvr03+3d4/Kcn2cnjh/8++9PJ1ycpPPn66ckJ",
+ "ffqEPnz6+CE8+tPXT07g4fybp7NH2aMnj2ZPHj355uun6eMnD2dPvnn67/cMHzIgW0AnPmno5C/JWb4Q",
+ "ydmb8+TSANvghJbsR9ja8uWGjH1hdJriSYSCsnxy6n/6//0JO0pF0Qzvf524nISTpdalOj0+Xq/XR2GX",
+ "4wX68yZaVOny2M/Tq5x+9ua8fje3zy64o7XHlPXFcaRwht/efndxSc7enB81BDM5nZwcnRw9NOOLEjgt",
+ "2eR08hh/wtOzxH0/dsQ2Of3wcTo5XgLNMfzF/FGAliz1nyTQbOv+r9Z0sQB55KrFm59Wj469WHH8wfk1",
+ "f9z17TgsvHj8oeX+ne3piYXZjj/4fOO7W7cSeju3d7P0qDX8B9Au0knpsCpsywQw23rP7SlRQjp30FIy",
+ "YU7V1FyRGaQSKJ4BITF3j5YVT60t304BHP/76uwv+J7x6uwv5FtyMnX+AwrVjtj01tmxJofzzILdf8JR",
+ "z7ZndSBBUI3o9F3MchKrYo/HydBKQO31iA03w8eNoEpOw5sNvz1Jnr7/8PWfPsZkvp4EWyMp8K0PUa+F",
+ "z8mNSCvo5tshlG3cw7MZ9x8VyG2ziIJuJiHAfZtmJODQu9b4rPnWNdQFczs3HKbIf1389JoISZyO+4am",
+ "17VbkQEZU01LsWKY+CYLsiWZnkMQu+svBNqXn3X+SYValO3cGzWa32MeXwQUD/2jkxPP6ZweEZy+Y3eo",
+ "g5k6xqc+oWHypsD61vdCVQQ2NNX5llAVPGOpatbk3O44f4kyab2f77T39Wf0FSxjZvxDHWEjyaGwUuRu",
+ "+C47+Ylb6HCuJlgxd79VvYeMKATvY5d9uLWeRr7s7v+M3e3LDqQU5kwz9GZsrhx/nbWAbOoYOnAHfPyP",
+ "yF9FhRKerVQOscIhOAM6Vvg5XUhSEPnaeAzhlwcPugt/8MDtOVNkDmtkspRjwy46Hjw4Mjv15EBWttOa",
+ "3MrgMersHDJcb7Ne0U1dr4ESLnjCsZD2CkigFj45efiHXeE5x3hYI5oSK3p/nE6+/gNv2Tk3gg3NCba0",
+ "q3n8h13NBcgVS4FcQlEKSSXLt+RnXid4DIp/9Nnfz/yaizX3iDBaZVUUVG6dEE1rnlPxIOXmTv7TCy5q",
+ "BG3konSh8IkZRVQr0zYF5ifvP3odYKRisavZ8QyzWo9tCipoPKyd4PuBOv6AFvDB349dGt74R3yJsCru",
+ "sQ97jrdsKT4f9MbA2umRUp0uq/L4A/4HVc4ALJv06lhv+DH6/Bx/aK3Gfe6tpv170z1ssSpEBh5gMZ/b",
+ "kmi7Ph9/sP8GE8GmBMnMnYKB5u5XmxDkGAsVbPs/b3ka/bG/jrJT3Tv28/GHdj3cFoLUstKZWAd98QXA",
+ "Pl/153OVzDt/H68p00Z+cZH1WK+o31kDzY9dGs3Or03mqt4XTMcV/NiReEphA5jayuZbur5s+X5KGwHy",
+ "TKCBYIgXbpIZ48ggQgbW2PXsx7720mNbl0uwXlz+aTQiHmpBZlLQLKUKy+C4hLM9tfXjLVWjbsDKeeTh",
+ "C8FES0A/SNsc9aO9ryE47hj5L9iXoHocyuHK2gN/Z5mpB9EzmhEf8ZaQVzQ3Gw4ZOXOSeQsbv7e88/kF",
+ "lM8sUXwyEeCZP3yKUAxMbeluMh4JFmSGHnPfGwXPMIAF8MSxoGQmsq0vhijpWm9s1EyXuR3XVS2jH+/A",
+ "SPjPbRncZxD8Yof7Yof7Yqn5Yof7srtf7HAj7XBfrFRfrFT/K61Uh5imYmKmM80MS5tYMYi25rW6HW0y",
+ "s9Usvh2zy3Qtk/WLCDJ9RMgl5r2i5paAFUiaY6FlFSSyK9AFEiN/ITu94kkLEutoaCb+qvmv9fC8qk5O",
+ "HgM5ud/tozTL85A39/uivIufbNbsb8nV5GrSG0lCIVaQ2bCpMDOQ7bV32P+vHvenXkoxjBFc0hXUAcJE",
+ "VfM5S5lFeS74gtCFaLyTDd8mXOAXkAY4m5iVMD11yY+ZImuzeFe3qZ3AqC259yWA82YL977od8gl/phv",
+ "CO/Al/x/G/OM/79aSr9FnO2tGOnOsXtc9QtX+RRc5bPzlT/6G2lgPvwfKWY+OXnyh11QaGx+LTT5Hj3v",
+ "byeO1bXwYvlpbypo+TB6b+5rvHdDb1i8RWs/2HfvzUWABczdBds4d54eH2MGzKVQ+nhirr+242f48X0N",
+ "s69YOiklW2EBlPcf/18AAAD//6Cbm1hv8QAA",
}
// GetSwagger returns the content of the embedded swagger specification file
diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go
index 05aa4ed91..fb904f890 100644
--- a/daemon/algod/api/server/v2/handlers.go
+++ b/daemon/algod/api/server/v2/handlers.go
@@ -247,8 +247,8 @@ func (v2 *Handlers) AddParticipationKey(ctx echo.Context) error {
partKeyBinary := buf.Bytes()
if len(partKeyBinary) == 0 {
- err := fmt.Errorf(errRESTPayloadZeroLength)
- return badRequest(ctx, err, err.Error(), v2.Log)
+ lenErr := fmt.Errorf(errRESTPayloadZeroLength)
+ return badRequest(ctx, lenErr, lenErr.Error(), v2.Log)
}
partID, err := v2.Node.InstallParticipationKey(partKeyBinary)
@@ -372,9 +372,9 @@ func (v2 *Handlers) AccountInformation(ctx echo.Context, address string, params
// count total # of resources, if max limit is set
if maxResults := v2.Node.Config().MaxAPIResourcesPerAccount; maxResults != 0 {
- record, _, _, err := myLedger.LookupAccount(myLedger.Latest(), addr)
- if err != nil {
- return internalError(ctx, err, errFailedLookingUpLedger, v2.Log)
+ record, _, _, lookupErr := myLedger.LookupAccount(myLedger.Latest(), addr)
+ if lookupErr != nil {
+ return internalError(ctx, lookupErr, errFailedLookingUpLedger, v2.Log)
}
totalResults := record.TotalAssets + record.TotalAssetParams + record.TotalAppLocalStates + record.TotalAppParams
if totalResults > maxResults {
@@ -430,9 +430,9 @@ func (v2 *Handlers) basicAccountInformation(ctx echo.Context, addr basics.Addres
}
if handle == protocol.CodecHandle {
- data, err := encode(handle, record)
- if err != nil {
- return internalError(ctx, err, errFailedToEncodeResponse, v2.Log)
+ data, encErr := encode(handle, record)
+ if encErr != nil {
+ return internalError(ctx, encErr, errFailedToEncodeResponse, v2.Log)
}
return ctx.Blob(http.StatusOK, contentType, data)
}
@@ -482,9 +482,9 @@ func (v2 *Handlers) basicAccountInformation(ctx echo.Context, addr basics.Addres
NumByteSlice: record.TotalAppSchema.NumByteSlice,
NumUint: record.TotalAppSchema.NumUint,
},
- AppsTotalExtraPages: numOrNil(uint64(record.TotalExtraAppPages)),
- TotalBoxes: numOrNil(record.TotalBoxes),
- TotalBoxBytes: numOrNil(record.TotalBoxBytes),
+ AppsTotalExtraPages: omitEmpty(uint64(record.TotalExtraAppPages)),
+ TotalBoxes: omitEmpty(record.TotalBoxes),
+ TotalBoxBytes: omitEmpty(record.TotalBoxBytes),
MinBalance: record.MinBalance(&consensus).Raw,
}
response := model.AccountResponse(account)
@@ -611,13 +611,13 @@ func (v2 *Handlers) GetBlock(ctx echo.Context, round uint64, params model.GetBlo
// msgpack format uses 'RawBlockBytes' and attaches a custom header.
if handle == protocol.CodecHandle {
- blockbytes, err := rpcs.RawBlockBytes(v2.Node.LedgerForAPI(), basics.Round(round))
- if err != nil {
- switch err.(type) {
+ blockbytes, blockErr := rpcs.RawBlockBytes(v2.Node.LedgerForAPI(), basics.Round(round))
+ if blockErr != nil {
+ switch blockErr.(type) {
case ledgercore.ErrNoEntry:
- return notFound(ctx, err, errFailedLookingUpLedger, v2.Log)
+ return notFound(ctx, blockErr, errFailedLookingUpLedger, v2.Log)
default:
- return internalError(ctx, err, err.Error(), v2.Log)
+ return internalError(ctx, blockErr, blockErr.Error(), v2.Log)
}
}
@@ -732,9 +732,9 @@ func (v2 *Handlers) GetTransactionProof(ctx echo.Context, round uint64, txid str
return badRequest(ctx, err, "unsupported hash type", v2.Log)
}
- proof, err := tree.ProveSingleLeaf(uint64(idx))
- if err != nil {
- return internalError(ctx, err, "generating proof", v2.Log)
+ proof, proofErr := tree.ProveSingleLeaf(uint64(idx))
+ if proofErr != nil {
+ return internalError(ctx, proofErr, "generating proof", v2.Log)
}
response := model.TransactionProofResponse{
@@ -821,7 +821,7 @@ func (v2 *Handlers) GetStatus(ctx echo.Context) error {
response.UpgradeVotes = &votes
response.UpgradeYesVotes = &votesYes
response.UpgradeNoVotes = &votesNo
- response.UpgradeNextProtocolVoteBefore = numOrNil(uint64(stat.NextProtocolVoteBefore))
+ response.UpgradeNextProtocolVoteBefore = omitEmpty(uint64(stat.NextProtocolVoteBefore))
response.UpgradeVoteRounds = &upgradeVoteRounds
}
@@ -929,26 +929,28 @@ func (v2 *Handlers) RawTransaction(ctx echo.Context) error {
// PreEncodedSimulateTxnResult mirrors model.SimulateTransactionResult
type PreEncodedSimulateTxnResult struct {
- Txn PreEncodedTxInfo `codec:"txn-result"`
- AppBudgetConsumed *uint64 `codec:"app-budget-consumed,omitempty"`
- LogicSigBudgetConsumed *uint64 `codec:"logic-sig-budget-consumed,omitempty"`
+ Txn PreEncodedTxInfo `codec:"txn-result"`
+ AppBudgetConsumed *uint64 `codec:"app-budget-consumed,omitempty"`
+ LogicSigBudgetConsumed *uint64 `codec:"logic-sig-budget-consumed,omitempty"`
+ TransactionTrace *model.SimulationTransactionExecTrace `codec:"exec-trace,omitempty"`
}
// PreEncodedSimulateTxnGroupResult mirrors model.SimulateTransactionGroupResult
type PreEncodedSimulateTxnGroupResult struct {
- Txns []PreEncodedSimulateTxnResult `codec:"txn-results"`
- FailureMessage *string `codec:"failure-message,omitempty"`
- FailedAt *[]uint64 `codec:"failed-at,omitempty"`
AppBudgetAdded *uint64 `codec:"app-budget-added,omitempty"`
AppBudgetConsumed *uint64 `codec:"app-budget-consumed,omitempty"`
+ FailedAt *[]uint64 `codec:"failed-at,omitempty"`
+ FailureMessage *string `codec:"failure-message,omitempty"`
+ Txns []PreEncodedSimulateTxnResult `codec:"txn-results"`
}
// PreEncodedSimulateResponse mirrors model.SimulateResponse
type PreEncodedSimulateResponse struct {
- Version uint64 `codec:"version"`
- LastRound uint64 `codec:"last-round"`
- TxnGroups []PreEncodedSimulateTxnGroupResult `codec:"txn-groups"`
- EvalOverrides *model.SimulationEvalOverrides `codec:"eval-overrides,omitempty"`
+ Version uint64 `codec:"version"`
+ LastRound uint64 `codec:"last-round"`
+ TxnGroups []PreEncodedSimulateTxnGroupResult `codec:"txn-groups"`
+ EvalOverrides *model.SimulationEvalOverrides `codec:"eval-overrides,omitempty"`
+ ExecTraceConfig simulation.ExecTraceConfig `codec:"exec-trace-config,omitempty"`
}
// PreEncodedSimulateRequestTransactionGroup mirrors model.SimulateRequestTransactionGroup
@@ -962,6 +964,7 @@ type PreEncodedSimulateRequest struct {
AllowEmptySignatures bool `codec:"allow-empty-signatures,omitempty"`
AllowMoreLogging bool `codec:"allow-more-logging,omitempty"`
ExtraOpcodeBudget uint64 `codec:"extra-opcode-budget,omitempty"`
+ ExecTraceConfig simulation.ExecTraceConfig `codec:"exec-trace-config,omitempty"`
}
// SimulateTransaction simulates broadcasting a raw transaction to the network, returning relevant simulation results.
@@ -1351,6 +1354,8 @@ func (v2 *Handlers) startCatchup(ctx echo.Context, catchpoint string) error {
code = http.StatusOK
case *node.CatchpointUnableToStartError:
return badRequest(ctx, err, err.Error(), v2.Log)
+ case *node.StartCatchpointError:
+ return timeout(ctx, err, err.Error(), v2.Log)
default:
return internalError(ctx, err, fmt.Sprintf(errFailedToStartCatchup, err), v2.Log)
}
diff --git a/daemon/algod/api/server/v2/handlers_test.go b/daemon/algod/api/server/v2/handlers_test.go
index 42de7293f..a0a50dddb 100644
--- a/daemon/algod/api/server/v2/handlers_test.go
+++ b/daemon/algod/api/server/v2/handlers_test.go
@@ -117,6 +117,8 @@ func makeTagGraph(rootType reflect.Type, seen map[reflect.Type]*tagNode) *tagNod
case reflect.Ptr:
// Directly embed value type graph
node = makeTagGraph(rootType.Elem(), seen)
+ // the node in seen for rootType should be refreshed from calculation.
+ seen[rootType] = node
case reflect.Struct:
for i := 0; i < rootType.NumField(); i++ {
field := rootType.Field(i)
@@ -141,8 +143,8 @@ func makeTagGraph(rootType reflect.Type, seen map[reflect.Type]*tagNode) *tagNod
} else {
tagValue = field.Name
}
- if len(tagValue) != 0 {
- // ignore any empty tags
+ if len(tagValue) != 0 && tagValue != "-" {
+ // ignore any empty tags and skipping fields
node.addChild(tagValue, subgraph)
}
}
diff --git a/daemon/algod/api/server/v2/test/handlers_test.go b/daemon/algod/api/server/v2/test/handlers_test.go
index a4625d41c..a56577d54 100644
--- a/daemon/algod/api/server/v2/test/handlers_test.go
+++ b/daemon/algod/api/server/v2/test/handlers_test.go
@@ -34,6 +34,8 @@ import (
"github.com/algorand/go-algorand/daemon/algod/api/server"
"github.com/algorand/go-algorand/ledger/eval"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ "golang.org/x/exp/slices"
+
"github.com/labstack/echo/v4"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
@@ -1029,8 +1031,7 @@ int 1`,
var expectedFailedAt *[]uint64
if len(scenario.FailedAt) != 0 {
- clone := make([]uint64, len(scenario.FailedAt))
- copy(clone, scenario.FailedAt)
+ clone := slices.Clone(scenario.FailedAt)
clone[0]++
expectedFailedAt = &clone
}
diff --git a/daemon/algod/api/server/v2/test/helpers.go b/daemon/algod/api/server/v2/test/helpers.go
index 75ee461ca..aef805148 100644
--- a/daemon/algod/api/server/v2/test/helpers.go
+++ b/daemon/algod/api/server/v2/test/helpers.go
@@ -40,7 +40,6 @@ import (
"github.com/algorand/go-algorand/ledger/simulation"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/node"
- "github.com/algorand/go-algorand/node/indexer"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/db"
)
@@ -168,7 +167,7 @@ func (m *mockNode) BroadcastSignedTxGroup(txgroup []transactions.SignedTxn) erro
}
func (m *mockNode) Simulate(request simulation.Request) (simulation.Result, error) {
- simulator := simulation.MakeSimulator(m.ledger.(*data.Ledger))
+ simulator := simulation.MakeSimulator(m.ledger.(*data.Ledger), m.config.EnableDeveloperAPI)
return simulator.Simulate(request)
}
@@ -221,10 +220,6 @@ func (m *mockNode) Uint64() uint64 {
return 1
}
-func (m *mockNode) Indexer() (*indexer.Indexer, error) {
- return nil, fmt.Errorf("indexer not implemented")
-}
-
func (m *mockNode) GetTransactionByID(txid transactions.Txid, rnd basics.Round) (node.TxnWithStatus, error) {
return node.TxnWithStatus{}, fmt.Errorf("get transaction by id not implemented")
}
diff --git a/daemon/algod/api/server/v2/utils.go b/daemon/algod/api/server/v2/utils.go
index 0aa4c9c94..f476afa21 100644
--- a/daemon/algod/api/server/v2/utils.go
+++ b/daemon/algod/api/server/v2/utils.go
@@ -26,6 +26,7 @@ import (
"github.com/algorand/go-codec/codec"
"github.com/labstack/echo/v4"
+ "golang.org/x/exp/slices"
"github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/basics"
@@ -74,18 +75,13 @@ func addrOrNil(addr basics.Address) *string {
return &ret
}
-func strOrNil(str string) *string {
- if str == "" {
+// omitEmpty defines a handy impl for all comparable types to convert from default value to nil ptr
+func omitEmpty[T comparable](val T) *T {
+ var defaultVal T
+ if val == defaultVal {
return nil
}
- return &str
-}
-
-func numOrNil(num uint64) *uint64 {
- if num == 0 {
- return nil
- }
- return &num
+ return &val
}
func byteOrNil(data []byte) *[]byte {
@@ -95,13 +91,6 @@ func byteOrNil(data []byte) *[]byte {
return &data
}
-func trueOrNil(b bool) *bool {
- if !b {
- return nil
- }
- return &b
-}
-
func nilToZero(numPtr *uint64) uint64 {
if numPtr == nil {
return 0
@@ -111,13 +100,10 @@ func nilToZero(numPtr *uint64) uint64 {
func computeCreatableIndexInPayset(tx node.TxnWithStatus, txnCounter uint64, payset []transactions.SignedTxnWithAD) (cidx *uint64) {
// Compute transaction index in block
- offset := -1
- for idx, stxnib := range payset {
- if tx.Txn.Txn.ID() == stxnib.Txn.ID() {
- offset = idx
- break
- }
- }
+ txID := tx.Txn.Txn.ID()
+ offset := slices.IndexFunc(payset, func(ad transactions.SignedTxnWithAD) bool {
+ return ad.Txn.ID() == txID
+ })
// Sanity check that txn was in fetched block
if offset < 0 {
@@ -268,8 +254,8 @@ func stateDeltaToStateDelta(d basics.StateDelta) *model.StateDelta {
Key: base64.StdEncoding.EncodeToString([]byte(k)),
Value: model.EvalDelta{
Action: uint64(v.Action),
- Bytes: strOrNil(base64.StdEncoding.EncodeToString([]byte(v.Bytes))),
- Uint: numOrNil(v.Uint),
+ Bytes: omitEmpty(base64.StdEncoding.EncodeToString([]byte(v.Bytes))),
+ Uint: omitEmpty(v.Uint),
},
})
}
@@ -347,8 +333,8 @@ func ConvertInnerTxn(txn *transactions.SignedTxnWithAD) PreEncodedTxInfo {
// Since this is an inner txn, we know these indexes will be populated. No
// need to search payset for IDs
- response.AssetIndex = numOrNil(uint64(txn.ApplyData.ConfigAsset))
- response.ApplicationIndex = numOrNil(uint64(txn.ApplyData.ApplicationID))
+ response.AssetIndex = omitEmpty(uint64(txn.ApplyData.ConfigAsset))
+ response.ApplicationIndex = omitEmpty(uint64(txn.ApplyData.ApplicationID))
withStatus := node.TxnWithStatus{
Txn: txn.SignedTxn,
@@ -360,11 +346,92 @@ func ConvertInnerTxn(txn *transactions.SignedTxnWithAD) PreEncodedTxInfo {
return response
}
+func convertScratchChanges(scratchChanges []simulation.ScratchChange) *[]model.ScratchChange {
+ if len(scratchChanges) == 0 {
+ return nil
+ }
+ modelSC := make([]model.ScratchChange, len(scratchChanges))
+ for i, scratchChange := range scratchChanges {
+ modelSC[i] = model.ScratchChange{
+ Slot: scratchChange.Slot,
+ NewValue: model.AvmValue{
+ Type: uint64(scratchChange.NewValue.Type),
+ Uint: omitEmpty(scratchChange.NewValue.Uint),
+ Bytes: byteOrNil([]byte(scratchChange.NewValue.Bytes)),
+ },
+ }
+ }
+ return &modelSC
+}
+
+func convertTealValueSliceToModel(tvs []basics.TealValue) *[]model.AvmValue {
+ if len(tvs) == 0 {
+ return nil
+ }
+ modelTvs := make([]model.AvmValue, len(tvs))
+ for i := range tvs {
+ modelTvs[i] = model.AvmValue{
+ Type: uint64(tvs[i].Type),
+ Uint: omitEmpty(tvs[i].Uint),
+ Bytes: byteOrNil([]byte(tvs[i].Bytes)),
+ }
+ }
+ return &modelTvs
+}
+
+func convertProgramTrace(programTrace []simulation.OpcodeTraceUnit) *[]model.SimulationOpcodeTraceUnit {
+ if len(programTrace) == 0 {
+ return nil
+ }
+ modelProgramTrace := make([]model.SimulationOpcodeTraceUnit, len(programTrace))
+ for i := range programTrace {
+ var spawnedInnersPtr *[]uint64
+ if len(programTrace[i].SpawnedInners) > 0 {
+ spawnedInners := make([]uint64, len(programTrace[i].SpawnedInners))
+ for j, innerIndex := range programTrace[i].SpawnedInners {
+ spawnedInners[j] = uint64(innerIndex)
+ }
+ spawnedInnersPtr = &spawnedInners
+ }
+ modelProgramTrace[i] = model.SimulationOpcodeTraceUnit{
+ Pc: programTrace[i].PC,
+ SpawnedInners: spawnedInnersPtr,
+ StackAdditions: convertTealValueSliceToModel(programTrace[i].StackAdded),
+ StackPopCount: omitEmpty(programTrace[i].StackPopCount),
+ ScratchChanges: convertScratchChanges(programTrace[i].ScratchSlotChanges),
+ }
+ }
+ return &modelProgramTrace
+}
+
+func convertTxnTrace(txnTrace *simulation.TransactionTrace) *model.SimulationTransactionExecTrace {
+ if txnTrace == nil {
+ return nil
+ }
+
+ execTraceModel := model.SimulationTransactionExecTrace{
+ ApprovalProgramTrace: convertProgramTrace(txnTrace.ApprovalProgramTrace),
+ ClearStateProgramTrace: convertProgramTrace(txnTrace.ClearStateProgramTrace),
+ LogicSigTrace: convertProgramTrace(txnTrace.LogicSigTrace),
+ }
+
+ if len(txnTrace.InnerTraces) > 0 {
+ innerTraces := make([]model.SimulationTransactionExecTrace, len(txnTrace.InnerTraces))
+ for i := range txnTrace.InnerTraces {
+ innerTraces[i] = *convertTxnTrace(&txnTrace.InnerTraces[i])
+ }
+ execTraceModel.InnerTrace = &innerTraces
+ }
+
+ return &execTraceModel
+}
+
func convertTxnResult(txnResult simulation.TxnResult) PreEncodedSimulateTxnResult {
return PreEncodedSimulateTxnResult{
Txn: ConvertInnerTxn(&txnResult.Txn),
- AppBudgetConsumed: numOrNil(txnResult.AppBudgetConsumed),
- LogicSigBudgetConsumed: numOrNil(txnResult.LogicSigBudgetConsumed),
+ AppBudgetConsumed: omitEmpty(txnResult.AppBudgetConsumed),
+ LogicSigBudgetConsumed: omitEmpty(txnResult.LogicSigBudgetConsumed),
+ TransactionTrace: convertTxnTrace(txnResult.Trace),
}
}
@@ -376,14 +443,13 @@ func convertTxnGroupResult(txnGroupResult simulation.TxnGroupResult) PreEncodedS
encoded := PreEncodedSimulateTxnGroupResult{
Txns: txnResults,
- FailureMessage: strOrNil(txnGroupResult.FailureMessage),
- AppBudgetAdded: numOrNil(txnGroupResult.AppBudgetAdded),
- AppBudgetConsumed: numOrNil(txnGroupResult.AppBudgetConsumed),
+ FailureMessage: omitEmpty(txnGroupResult.FailureMessage),
+ AppBudgetAdded: omitEmpty(txnGroupResult.AppBudgetAdded),
+ AppBudgetConsumed: omitEmpty(txnGroupResult.AppBudgetConsumed),
}
if len(txnGroupResult.FailedAt) > 0 {
- failedAt := make([]uint64, len(txnGroupResult.FailedAt))
- copy(failedAt, txnGroupResult.FailedAt)
+ failedAt := slices.Clone[[]uint64, uint64](txnGroupResult.FailedAt)
encoded.FailedAt = &failedAt
}
@@ -394,18 +460,19 @@ func convertSimulationResult(result simulation.Result) PreEncodedSimulateRespons
var evalOverrides *model.SimulationEvalOverrides
if result.EvalOverrides != (simulation.ResultEvalOverrides{}) {
evalOverrides = &model.SimulationEvalOverrides{
- AllowEmptySignatures: trueOrNil(result.EvalOverrides.AllowEmptySignatures),
+ AllowEmptySignatures: omitEmpty(result.EvalOverrides.AllowEmptySignatures),
MaxLogSize: result.EvalOverrides.MaxLogSize,
MaxLogCalls: result.EvalOverrides.MaxLogCalls,
- ExtraOpcodeBudget: numOrNil(result.EvalOverrides.ExtraOpcodeBudget),
+ ExtraOpcodeBudget: omitEmpty(result.EvalOverrides.ExtraOpcodeBudget),
}
}
encodedSimulationResult := PreEncodedSimulateResponse{
- Version: result.Version,
- LastRound: uint64(result.LastRound),
- TxnGroups: make([]PreEncodedSimulateTxnGroupResult, len(result.TxnGroups)),
- EvalOverrides: evalOverrides,
+ Version: result.Version,
+ LastRound: uint64(result.LastRound),
+ TxnGroups: make([]PreEncodedSimulateTxnGroupResult, len(result.TxnGroups)),
+ EvalOverrides: evalOverrides,
+ ExecTraceConfig: result.TraceConfig,
}
for i, txnGroup := range result.TxnGroups {
@@ -425,6 +492,7 @@ func convertSimulationRequest(request PreEncodedSimulateRequest) simulation.Requ
AllowEmptySignatures: request.AllowEmptySignatures,
AllowMoreLogging: request.AllowMoreLogging,
ExtraOpcodeBudget: request.ExtraOpcodeBudget,
+ TraceConfig: request.ExecTraceConfig,
}
}
diff --git a/daemon/algod/api/spec/v2/msgp_gen.go b/daemon/algod/api/spec/v2/msgp_gen.go
index b14d47b79..140399c19 100644
--- a/daemon/algod/api/spec/v2/msgp_gen.go
+++ b/daemon/algod/api/spec/v2/msgp_gen.go
@@ -16,6 +16,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> AccountApplicationModelMaxSize()
//
// AccountAssetModel
// |-----> (*) MarshalMsg
@@ -24,6 +25,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> AccountAssetModelMaxSize()
//
// MarshalMsg implements msgp.Marshaler
@@ -218,6 +220,15 @@ func (z *AccountApplicationModel) MsgIsZero() bool {
return ((*z).AppLocalState == nil) && ((*z).AppParams == nil)
}
+// MaxSize returns a maximum valid message size for this message type
+func AccountApplicationModelMaxSize() (s int) {
+ s = 1 + 16
+ s += basics.AppLocalStateMaxSize()
+ s += 11
+ s += basics.AppParamsMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *AccountAssetModel) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -409,3 +420,12 @@ func (z *AccountAssetModel) Msgsize() (s int) {
func (z *AccountAssetModel) MsgIsZero() bool {
return ((*z).AssetParams == nil) && ((*z).AssetHolding == nil)
}
+
+// MaxSize returns a maximum valid message size for this message type
+func AccountAssetModelMaxSize() (s int) {
+ s = 1 + 13
+ s += basics.AssetParamsMaxSize()
+ s += 14
+ s += basics.AssetHoldingMaxSize()
+ return
+}
diff --git a/daemon/algod/server.go b/daemon/algod/server.go
index 010d3aec5..9f702a038 100644
--- a/daemon/algod/server.go
+++ b/daemon/algod/server.go
@@ -132,13 +132,13 @@ func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genes
}
if cfg.IsGossipServer() {
var ot basics.OverflowTracker
- fdRequired := ot.Add(fdRequired, uint64(cfg.IncomingConnectionsLimit))
+ fdRequired = ot.Add(fdRequired, uint64(cfg.IncomingConnectionsLimit))
if ot.Overflowed {
return errors.New("Initialize() overflowed when adding up IncomingConnectionsLimit to the existing RLIMIT_NOFILE value; decrease RestConnectionsHardLimit or IncomingConnectionsLimit")
}
- _, hard, err := util.GetFdLimits()
- if err != nil {
- s.log.Errorf("Failed to get RLIMIT_NOFILE values: %s", err.Error())
+ _, hard, fdErr := util.GetFdLimits()
+ if fdErr != nil {
+ s.log.Errorf("Failed to get RLIMIT_NOFILE values: %s", fdErr.Error())
} else {
maxFDs := fdRequired
if fdRequired > hard {
@@ -157,10 +157,10 @@ func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genes
}
}
}
- err = util.SetFdSoftLimit(maxFDs)
- if err != nil {
+ fdErr = util.SetFdSoftLimit(maxFDs)
+ if fdErr != nil {
// do not fail but log the error
- s.log.Errorf("Failed to set a new RLIMIT_NOFILE value to %d (max %d): %s", fdRequired, hard, err.Error())
+ s.log.Errorf("Failed to set a new RLIMIT_NOFILE value to %d (max %d): %s", fdRequired, hard, fdErr.Error())
}
}
}
diff --git a/daemon/kmd/api/api.go b/daemon/kmd/api/api.go
index e27c97480..90c374482 100644
--- a/daemon/kmd/api/api.go
+++ b/daemon/kmd/api/api.go
@@ -18,35 +18,35 @@
//
// API for KMD (Key Management Daemon)
//
-// Schemes: http
-// Host: localhost
-// BasePath: /
-// Version: 0.0.1
-// License:
-// Contact: contact@algorand.com
+// Schemes: http
+// Host: localhost
+// BasePath: /
+// Version: 0.0.1
+// License:
+// Contact: contact@algorand.com
//
-// Consumes:
-// - application/json
+// Consumes:
+// - application/json
//
-// Produces:
-// - application/json
+// Produces:
+// - application/json
//
-// Security:
-// - api_key:
+// Security:
+// - api_key:
//
-// SecurityDefinitions:
-// api_key:
-// type: apiKey
-// name: X-KMD-API-Token
-// in: header
-// description: >-
-// Generated header parameter. This value can be found in `/kmd/data/dir/kmd.token`. Example value:
-// '330b2e4fc9b20f4f89812cf87f1dabeb716d23e3f11aec97a61ff5f750563b78'
-// required: true
-// x-example: 330b2e4fc9b20f4f89812cf87f1dabeb716d23e3f11aec97a61ff5f750563b78
+// SecurityDefinitions:
+// api_key:
+// type: apiKey
+// name: X-KMD-API-Token
+// in: header
+// description: >-
+// Generated header parameter. This value can be found in `/kmd/data/dir/kmd.token`. Example value:
+// '330b2e4fc9b20f4f89812cf87f1dabeb716d23e3f11aec97a61ff5f750563b78'
+// required: true
+// x-example: 330b2e4fc9b20f4f89812cf87f1dabeb716d23e3f11aec97a61ff5f750563b78
//
// swagger:meta
-//---
+// ---
// IF YOU MODIFY SUBPACKAGES: IMPORTANT
// MAKE SURE YOU REGENERATE THE SWAGGER SPEC (using go:generate)
// MAKE SURE IT VALIDATES
@@ -58,6 +58,7 @@
// Autogenerate the swagger json.
// Base path must be a fully specified package name (else, it seems that swagger feeds a relative path to
// loader.Config.Import(), and that breaks the vendor directory if the source is symlinked from elsewhere)
+//
//go:generate swagger generate spec -m -o="./swagger.json"
//go:generate swagger validate ./swagger.json --stop-on-error
//go:generate sh ../lib/kmdapi/bundle_swagger_json.sh
diff --git a/daemon/kmd/session/auth.go b/daemon/kmd/session/auth.go
index ecf2db88c..29aa5acc2 100644
--- a/daemon/kmd/session/auth.go
+++ b/daemon/kmd/session/auth.go
@@ -52,15 +52,12 @@ func validateHandleSecret(handleSecret []byte) error {
}
func splitHandle(walletHandle []byte) ([]byte, []byte, error) {
- split := bytes.SplitN(walletHandle, wHandleTokenSplitChar, 2)
+ handleID, handleSecret, found := bytes.Cut(walletHandle, wHandleTokenSplitChar)
- if len(split) != 2 {
+ if !found {
return nil, nil, fmt.Errorf("wrong number of token parts")
}
- handleID := split[0]
- handleSecret := split[1]
-
err := validateHandleID(handleID)
if err != nil {
return nil, nil, err
diff --git a/data/account/msgp_gen.go b/data/account/msgp_gen.go
index 96934260c..3bef76693 100644
--- a/data/account/msgp_gen.go
+++ b/data/account/msgp_gen.go
@@ -4,6 +4,10 @@ package account
import (
"github.com/algorand/msgp/msgp"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklesignature"
+ "github.com/algorand/go-algorand/data/basics"
)
// The following msgp objects are implemented in this file:
@@ -14,6 +18,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> ParticipationKeyIdentityMaxSize()
//
// StateProofKeys
// |-----> MarshalMsg
@@ -22,6 +27,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> StateProofKeysMaxSize()
//
// MarshalMsg implements msgp.Marshaler
@@ -245,6 +251,12 @@ func (z *ParticipationKeyIdentity) MsgIsZero() bool {
return ((*z).Parent.MsgIsZero()) && ((*z).VRFSK.MsgIsZero()) && ((*z).VoteID.MsgIsZero()) && ((*z).FirstValid.MsgIsZero()) && ((*z).LastValid.MsgIsZero()) && ((*z).KeyDilution == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func ParticipationKeyIdentityMaxSize() (s int) {
+ s = 1 + 5 + basics.AddressMaxSize() + 6 + crypto.VrfPrivkeyMaxSize() + 8 + crypto.OneTimeSignatureVerifierMaxSize() + 3 + basics.RoundMaxSize() + 3 + basics.RoundMaxSize() + 3 + msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z StateProofKeys) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -317,3 +329,10 @@ func (z StateProofKeys) Msgsize() (s int) {
func (z StateProofKeys) MsgIsZero() bool {
return len(z) == 0
}
+
+// MaxSize returns a maximum valid message size for this message type
+func StateProofKeysMaxSize() (s int) {
+ // Calculating size of slice: z
+ s += msgp.ArrayHeaderSize + ((1000) * (merklesignature.KeyRoundPairMaxSize()))
+ return
+}
diff --git a/data/account/participation.go b/data/account/participation.go
index 0e1d4564a..376e9090f 100644
--- a/data/account/participation.go
+++ b/data/account/participation.go
@@ -42,6 +42,7 @@ import (
// For correctness, all Roots should have no more than one Participation
// globally active at any time. If this condition is violated, the Root may
// equivocate. (Algorand tolerates a limited fraction of misbehaving accounts.)
+//
//msgp:ignore Participation
type Participation struct {
Parent basics.Address
@@ -103,6 +104,7 @@ func (part Participation) ID() ParticipationID {
// PersistedParticipation encapsulates the static state of the participation
// for a single address at any given moment, while providing the ability
// to handle persistence and deletion of secrets.
+//
//msgp:ignore PersistedParticipation
type PersistedParticipation struct {
Participation
diff --git a/data/account/participationRegistry.go b/data/account/participationRegistry.go
index bc9d0b301..3add9ec5b 100644
--- a/data/account/participationRegistry.go
+++ b/data/account/participationRegistry.go
@@ -37,6 +37,7 @@ import (
const defaultTimeout = 5 * time.Second
// ParticipationID identifies a particular set of participation keys.
+//
//msgp:ignore ParticipationID
type ParticipationID crypto.Digest
@@ -180,6 +181,7 @@ func (r ParticipationRecord) OverlapsInterval(first, last basics.Round) bool {
}
// ParticipationAction is used when recording participation actions.
+//
//msgp:ignore ParticipationAction
type ParticipationAction int
@@ -780,18 +782,18 @@ func (db *participationDB) GetStateProofSecretsForRound(id ParticipationID, roun
var rawStateProofKey []byte
err = db.store.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
// fetch secret key
- keyFirstValidRound, err := partRecord.StateProof.FirstRoundInKeyLifetime(uint64(round))
- if err != nil {
- return err
+ keyFirstValidRound, err2 := partRecord.StateProof.FirstRoundInKeyLifetime(uint64(round))
+ if err2 != nil {
+ return err2
}
row := tx.QueryRow(selectStateProofKey, keyFirstValidRound, id[:])
- err = row.Scan(&rawStateProofKey)
- if err == sql.ErrNoRows {
+ err2 = row.Scan(&rawStateProofKey)
+ if err2 == sql.ErrNoRows {
return ErrSecretNotFound
}
- if err != nil {
- return fmt.Errorf("error while querying secrets: %w", err)
+ if err2 != nil {
+ return fmt.Errorf("error while querying secrets: %w", err2)
}
return nil
@@ -814,9 +816,9 @@ func (db *participationDB) GetStateProofSecretsForRound(id ParticipationID, roun
err = db.store.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
// fetch stateproof public data
row := tx.QueryRow(selectStateProofData, id[:])
- err := row.Scan(&rawSignerContext)
- if err != nil {
- return fmt.Errorf("error while querying stateproof data: %w", err)
+ err2 := row.Scan(&rawSignerContext)
+ if err2 != nil {
+ return fmt.Errorf("error while querying stateproof data: %w", err2)
}
return nil
})
@@ -885,10 +887,11 @@ func recordActive(record ParticipationRecord, on basics.Round) bool {
}
// PKI TODO: Register needs a bit more work to make sure EffectiveFirst and
-// EffectiveLast are set at the right time. Specifically, the node
-// doesn't call Register until the key becomes active and is about
-// to be used, so effective first/last is updated just-in-time. It
-// would be better to update them when the KeyRegistration occurs.
+//
+// EffectiveLast are set at the right time. Specifically, the node
+// doesn't call Register until the key becomes active and is about
+// to be used, so effective first/last is updated just-in-time. It
+// would be better to update them when the KeyRegistration occurs.
func (db *participationDB) Register(id ParticipationID, on basics.Round) error {
// Lookup recordToRegister for first/last valid and account.
recordToRegister := db.Get(id)
diff --git a/data/account/registeryDbOps.go b/data/account/registeryDbOps.go
index e6722e5eb..ad5174c1c 100644
--- a/data/account/registeryDbOps.go
+++ b/data/account/registeryDbOps.go
@@ -150,7 +150,7 @@ func (i *insertOp) apply(db *participationDB) (err error) {
}
err = db.store.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- result, err := tx.Exec(
+ result, err2 := tx.Exec(
insertKeysetQuery,
i.id[:],
i.record.Parent[:],
@@ -159,17 +159,17 @@ func (i *insertOp) apply(db *participationDB) (err error) {
i.record.KeyDilution,
rawVRF,
rawStateProofContext)
- if err = verifyExecWithOneRowEffected(err, result, "insert keyset"); err != nil {
- return err
+ if err2 = verifyExecWithOneRowEffected(err2, result, "insert keyset"); err2 != nil {
+ return err2
}
- pk, err := result.LastInsertId()
- if err != nil {
- return fmt.Errorf("unable to get pk from keyset: %w", err)
+ pk, err2 := result.LastInsertId()
+ if err2 != nil {
+ return fmt.Errorf("unable to get pk from keyset: %w", err2)
}
// Create Rolling entry
- result, err = tx.Exec(insertRollingQuery, pk, rawVoting)
- return verifyExecWithOneRowEffected(err, result, "insert rolling")
+ result, err2 = tx.Exec(insertRollingQuery, pk, rawVoting)
+ return verifyExecWithOneRowEffected(err2, result, "insert rolling")
})
return err
}
diff --git a/data/basics/msgp_gen.go b/data/basics/msgp_gen.go
index c39ca5833..274b6c2b1 100644
--- a/data/basics/msgp_gen.go
+++ b/data/basics/msgp_gen.go
@@ -9,6 +9,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklesignature"
)
// The following msgp objects are implemented in this file:
@@ -19,6 +20,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> AccountDataMaxSize()
//
// Address
// |-----> (*) MarshalMsg
@@ -35,6 +37,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> AppIndexMaxSize()
//
// AppLocalState
// |-----> (*) MarshalMsg
@@ -43,6 +46,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> AppLocalStateMaxSize()
//
// AppParams
// |-----> (*) MarshalMsg
@@ -51,6 +55,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> AppParamsMaxSize()
//
// AssetHolding
// |-----> (*) MarshalMsg
@@ -59,6 +64,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> AssetHoldingMaxSize()
//
// AssetIndex
// |-----> MarshalMsg
@@ -67,6 +73,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> AssetIndexMaxSize()
//
// AssetParams
// |-----> (*) MarshalMsg
@@ -75,6 +82,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> AssetParamsMaxSize()
//
// BalanceRecord
// |-----> (*) MarshalMsg
@@ -83,6 +91,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> BalanceRecordMaxSize()
//
// CreatableIndex
// |-----> MarshalMsg
@@ -91,6 +100,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> CreatableIndexMaxSize()
//
// CreatableType
// |-----> MarshalMsg
@@ -99,6 +109,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> CreatableTypeMaxSize()
//
// DeltaAction
// |-----> MarshalMsg
@@ -107,6 +118,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> DeltaActionMaxSize()
//
// Participant
// |-----> (*) MarshalMsg
@@ -115,6 +127,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> ParticipantMaxSize()
//
// Round
// |-----> MarshalMsg
@@ -123,6 +136,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> RoundMaxSize()
//
// RoundInterval
// |-----> MarshalMsg
@@ -131,6 +145,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> RoundIntervalMaxSize()
//
// StateDelta
// |-----> MarshalMsg
@@ -139,6 +154,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> StateDeltaMaxSize()
//
// StateSchema
// |-----> (*) MarshalMsg
@@ -147,6 +163,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> StateSchemaMaxSize()
//
// StateSchemas
// |-----> (*) MarshalMsg
@@ -155,6 +172,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> StateSchemasMaxSize()
//
// Status
// |-----> MarshalMsg
@@ -163,6 +181,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> StatusMaxSize()
//
// TealKeyValue
// |-----> MarshalMsg
@@ -171,6 +190,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> TealKeyValueMaxSize()
//
// TealType
// |-----> MarshalMsg
@@ -179,6 +199,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> TealTypeMaxSize()
//
// TealValue
// |-----> (*) MarshalMsg
@@ -187,6 +208,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> TealValueMaxSize()
//
// ValueDelta
// |-----> (*) MarshalMsg
@@ -195,6 +217,10 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> ValueDeltaMaxSize()
+//
+// crypto.Digest
+// |-----> crypto.DigestMaxSize()
//
// MarshalMsg implements msgp.Marshaler
@@ -1359,6 +1385,37 @@ func (z *AccountData) MsgIsZero() bool {
return ((*z).Status == 0) && ((*z).MicroAlgos.MsgIsZero()) && ((*z).RewardsBase == 0) && ((*z).RewardedMicroAlgos.MsgIsZero()) && ((*z).VoteID.MsgIsZero()) && ((*z).SelectionID.MsgIsZero()) && ((*z).StateProofID.MsgIsZero()) && ((*z).VoteFirstValid == 0) && ((*z).VoteLastValid == 0) && ((*z).VoteKeyDilution == 0) && (len((*z).AssetParams) == 0) && (len((*z).Assets) == 0) && ((*z).AuthAddr.MsgIsZero()) && (len((*z).AppLocalStates) == 0) && (len((*z).AppParams) == 0) && (((*z).TotalAppSchema.NumUint == 0) && ((*z).TotalAppSchema.NumByteSlice == 0)) && ((*z).TotalExtraAppPages == 0) && ((*z).TotalBoxes == 0) && ((*z).TotalBoxBytes == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func AccountDataMaxSize() (s int) {
+ s = 3 + 4 + msgp.ByteSize + 5 + MicroAlgosMaxSize() + 6 + msgp.Uint64Size + 4 + MicroAlgosMaxSize() + 5 + crypto.OneTimeSignatureVerifierMaxSize() + 4 + crypto.VRFVerifierMaxSize() + 6 + merklesignature.CommitmentMaxSize() + 8 + msgp.Uint64Size + 8 + msgp.Uint64Size + 7 + msgp.Uint64Size + 5
+ s += msgp.MapHeaderSize
+ // Adding size of map keys for z.AssetParams
+ s += encodedMaxAssetsPerAccount * (AssetIndexMaxSize())
+ // Adding size of map values for z.AssetParams
+ s += encodedMaxAssetsPerAccount * (AssetParamsMaxSize())
+ s += 6
+ s += msgp.MapHeaderSize
+ // Adding size of map keys for z.Assets
+ s += encodedMaxAssetsPerAccount * (AssetIndexMaxSize())
+ // Adding size of map values for z.Assets
+ s += encodedMaxAssetsPerAccount * (1)
+ s += 2 + msgp.Uint64Size + 2 + msgp.BoolSize
+ s += 6 + AddressMaxSize() + 5
+ s += msgp.MapHeaderSize
+ // Adding size of map keys for z.AppLocalStates
+ s += EncodedMaxAppLocalStates * (AppIndexMaxSize())
+ // Adding size of map values for z.AppLocalStates
+ s += EncodedMaxAppLocalStates * (AppLocalStateMaxSize())
+ s += 5
+ s += msgp.MapHeaderSize
+ // Adding size of map keys for z.AppParams
+ s += EncodedMaxAppParams * (AppIndexMaxSize())
+ // Adding size of map values for z.AppParams
+ s += EncodedMaxAppParams * (AppParamsMaxSize())
+ s += 5 + 1 + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size + 5 + msgp.Uint32Size + 4 + msgp.Uint64Size + 5 + msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *Address) MarshalMsg(b []byte) []byte {
return ((*(crypto.Digest))(z)).MarshalMsg(b)
@@ -1387,6 +1444,11 @@ func (z *Address) MsgIsZero() bool {
return ((*(crypto.Digest))(z)).MsgIsZero()
}
+// MaxSize returns a maximum valid message size for this message type
+func AddressMaxSize() int {
+ return crypto.DigestMaxSize()
+}
+
// MarshalMsg implements msgp.Marshaler
func (z AppIndex) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1433,6 +1495,12 @@ func (z AppIndex) MsgIsZero() bool {
return z == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func AppIndexMaxSize() (s int) {
+ s = msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *AppLocalState) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1790,6 +1858,17 @@ func (z *AppLocalState) MsgIsZero() bool {
return (((*z).Schema.NumUint == 0) && ((*z).Schema.NumByteSlice == 0)) && (len((*z).KeyValue) == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func AppLocalStateMaxSize() (s int) {
+ s = 1 + 5 + 1 + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size + 4
+ s += msgp.MapHeaderSize
+ // Adding size of map keys for z.KeyValue
+ s += EncodedMaxKeyValueEntries * (msgp.StringPrefixSize + config.MaxAppBytesKeyLen)
+ // Adding size of map values for z.KeyValue
+ s += EncodedMaxKeyValueEntries * (TealValueMaxSize())
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *AppParams) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2430,6 +2509,18 @@ func (z *AppParams) MsgIsZero() bool {
return (len((*z).ApprovalProgram) == 0) && (len((*z).ClearStateProgram) == 0) && (len((*z).GlobalState) == 0) && (((*z).StateSchemas.LocalStateSchema.NumUint == 0) && ((*z).StateSchemas.LocalStateSchema.NumByteSlice == 0)) && (((*z).StateSchemas.GlobalStateSchema.NumUint == 0) && ((*z).StateSchemas.GlobalStateSchema.NumByteSlice == 0)) && ((*z).ExtraProgramPages == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func AppParamsMaxSize() (s int) {
+ s = 1 + 7 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 7 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 3
+ s += msgp.MapHeaderSize
+ // Adding size of map keys for z.GlobalState
+ s += EncodedMaxKeyValueEntries * (msgp.StringPrefixSize + config.MaxAppBytesKeyLen)
+ // Adding size of map values for z.GlobalState
+ s += EncodedMaxKeyValueEntries * (TealValueMaxSize())
+ s += 5 + 1 + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size + 5 + 1 + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size + 4 + msgp.Uint32Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *AssetHolding) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2559,6 +2650,12 @@ func (z *AssetHolding) MsgIsZero() bool {
return ((*z).Amount == 0) && ((*z).Frozen == false)
}
+// MaxSize returns a maximum valid message size for this message type
+func AssetHoldingMaxSize() (s int) {
+ s = 1 + 2 + msgp.Uint64Size + 2 + msgp.BoolSize
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z AssetIndex) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2605,6 +2702,12 @@ func (z AssetIndex) MsgIsZero() bool {
return z == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func AssetIndexMaxSize() (s int) {
+ s = msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *AssetParams) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2761,6 +2864,16 @@ func (z *AssetParams) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0002 > 0 {
zb0002--
+ var zb0004 int
+ zb0004, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "UnitName")
+ return
+ }
+ if zb0004 > config.MaxAssetUnitNameBytes {
+ err = msgp.ErrOverflow(uint64(zb0004), uint64(config.MaxAssetUnitNameBytes))
+ return
+ }
(*z).UnitName, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "UnitName")
@@ -2769,6 +2882,16 @@ func (z *AssetParams) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0002 > 0 {
zb0002--
+ var zb0005 int
+ zb0005, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "AssetName")
+ return
+ }
+ if zb0005 > config.MaxAssetNameBytes {
+ err = msgp.ErrOverflow(uint64(zb0005), uint64(config.MaxAssetNameBytes))
+ return
+ }
(*z).AssetName, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "AssetName")
@@ -2777,6 +2900,16 @@ func (z *AssetParams) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0002 > 0 {
zb0002--
+ var zb0006 int
+ zb0006, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "URL")
+ return
+ }
+ if zb0006 > config.MaxAssetURLBytes {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(config.MaxAssetURLBytes))
+ return
+ }
(*z).URL, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "URL")
@@ -2865,18 +2998,48 @@ func (z *AssetParams) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "un":
+ var zb0007 int
+ zb0007, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "UnitName")
+ return
+ }
+ if zb0007 > config.MaxAssetUnitNameBytes {
+ err = msgp.ErrOverflow(uint64(zb0007), uint64(config.MaxAssetUnitNameBytes))
+ return
+ }
(*z).UnitName, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "UnitName")
return
}
case "an":
+ var zb0008 int
+ zb0008, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "AssetName")
+ return
+ }
+ if zb0008 > config.MaxAssetNameBytes {
+ err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxAssetNameBytes))
+ return
+ }
(*z).AssetName, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "AssetName")
return
}
case "au":
+ var zb0009 int
+ zb0009, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "URL")
+ return
+ }
+ if zb0009 > config.MaxAssetURLBytes {
+ err = msgp.ErrOverflow(uint64(zb0009), uint64(config.MaxAssetURLBytes))
+ return
+ }
(*z).URL, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "URL")
@@ -2941,6 +3104,15 @@ func (z *AssetParams) MsgIsZero() bool {
return ((*z).Total == 0) && ((*z).Decimals == 0) && ((*z).DefaultFrozen == false) && ((*z).UnitName == "") && ((*z).AssetName == "") && ((*z).URL == "") && ((*z).MetadataHash == ([32]byte{})) && ((*z).Manager.MsgIsZero()) && ((*z).Reserve.MsgIsZero()) && ((*z).Freeze.MsgIsZero()) && ((*z).Clawback.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func AssetParamsMaxSize() (s int) {
+ s = 1 + 2 + msgp.Uint64Size + 3 + msgp.Uint32Size + 3 + msgp.BoolSize + 3 + msgp.StringPrefixSize + config.MaxAssetUnitNameBytes + 3 + msgp.StringPrefixSize + config.MaxAssetNameBytes + 3 + msgp.StringPrefixSize + config.MaxAssetURLBytes + 3
+ // Calculating size of array: z.MetadataHash
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ s += 2 + AddressMaxSize() + 2 + AddressMaxSize() + 2 + AddressMaxSize() + 2 + AddressMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *BalanceRecord) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -4126,6 +4298,37 @@ func (z *BalanceRecord) MsgIsZero() bool {
return ((*z).Addr.MsgIsZero()) && ((*z).AccountData.Status == 0) && ((*z).AccountData.MicroAlgos.MsgIsZero()) && ((*z).AccountData.RewardsBase == 0) && ((*z).AccountData.RewardedMicroAlgos.MsgIsZero()) && ((*z).AccountData.VoteID.MsgIsZero()) && ((*z).AccountData.SelectionID.MsgIsZero()) && ((*z).AccountData.StateProofID.MsgIsZero()) && ((*z).AccountData.VoteFirstValid == 0) && ((*z).AccountData.VoteLastValid == 0) && ((*z).AccountData.VoteKeyDilution == 0) && (len((*z).AccountData.AssetParams) == 0) && (len((*z).AccountData.Assets) == 0) && ((*z).AccountData.AuthAddr.MsgIsZero()) && (len((*z).AccountData.AppLocalStates) == 0) && (len((*z).AccountData.AppParams) == 0) && (((*z).AccountData.TotalAppSchema.NumUint == 0) && ((*z).AccountData.TotalAppSchema.NumByteSlice == 0)) && ((*z).AccountData.TotalExtraAppPages == 0) && ((*z).AccountData.TotalBoxes == 0) && ((*z).AccountData.TotalBoxBytes == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func BalanceRecordMaxSize() (s int) {
+ s = 3 + 5 + AddressMaxSize() + 4 + msgp.ByteSize + 5 + MicroAlgosMaxSize() + 6 + msgp.Uint64Size + 4 + MicroAlgosMaxSize() + 5 + crypto.OneTimeSignatureVerifierMaxSize() + 4 + crypto.VRFVerifierMaxSize() + 6 + merklesignature.CommitmentMaxSize() + 8 + msgp.Uint64Size + 8 + msgp.Uint64Size + 7 + msgp.Uint64Size + 5
+ s += msgp.MapHeaderSize
+ // Adding size of map keys for z.AccountData.AssetParams
+ s += encodedMaxAssetsPerAccount * (AssetIndexMaxSize())
+ // Adding size of map values for z.AccountData.AssetParams
+ s += encodedMaxAssetsPerAccount * (AssetParamsMaxSize())
+ s += 6
+ s += msgp.MapHeaderSize
+ // Adding size of map keys for z.AccountData.Assets
+ s += encodedMaxAssetsPerAccount * (AssetIndexMaxSize())
+ // Adding size of map values for z.AccountData.Assets
+ s += encodedMaxAssetsPerAccount * (1)
+ s += 2 + msgp.Uint64Size + 2 + msgp.BoolSize
+ s += 6 + AddressMaxSize() + 5
+ s += msgp.MapHeaderSize
+ // Adding size of map keys for z.AccountData.AppLocalStates
+ s += EncodedMaxAppLocalStates * (AppIndexMaxSize())
+ // Adding size of map values for z.AccountData.AppLocalStates
+ s += EncodedMaxAppLocalStates * (AppLocalStateMaxSize())
+ s += 5
+ s += msgp.MapHeaderSize
+ // Adding size of map keys for z.AccountData.AppParams
+ s += EncodedMaxAppParams * (AppIndexMaxSize())
+ // Adding size of map values for z.AccountData.AppParams
+ s += EncodedMaxAppParams * (AppParamsMaxSize())
+ s += 5 + 1 + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size + 5 + msgp.Uint32Size + 4 + msgp.Uint64Size + 5 + msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z CreatableIndex) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -4172,6 +4375,12 @@ func (z CreatableIndex) MsgIsZero() bool {
return z == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func CreatableIndexMaxSize() (s int) {
+ s = msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z CreatableType) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -4218,6 +4427,12 @@ func (z CreatableType) MsgIsZero() bool {
return z == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func CreatableTypeMaxSize() (s int) {
+ s = msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z DeltaAction) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -4264,6 +4479,12 @@ func (z DeltaAction) MsgIsZero() bool {
return z == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func DeltaActionMaxSize() (s int) {
+ s = msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *Participant) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -4393,6 +4614,12 @@ func (z *Participant) MsgIsZero() bool {
return ((*z).PK.MsgIsZero()) && ((*z).Weight == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func ParticipantMaxSize() (s int) {
+ s = 1 + 2 + merklesignature.VerifierMaxSize() + 2 + msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z Round) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -4439,6 +4666,12 @@ func (z Round) MsgIsZero() bool {
return z == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func RoundMaxSize() (s int) {
+ s = msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z RoundInterval) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -4485,6 +4718,12 @@ func (z RoundInterval) MsgIsZero() bool {
return z == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func RoundIntervalMaxSize() (s int) {
+ s = msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z StateDelta) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -4577,6 +4816,16 @@ func (z StateDelta) MsgIsZero() bool {
return len(z) == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func StateDeltaMaxSize() (s int) {
+ s += msgp.MapHeaderSize
+ // Adding size of map keys for z
+ s += config.MaxStateDeltaKeys * (msgp.StringPrefixSize + config.MaxAppBytesKeyLen)
+ // Adding size of map values for z
+ s += config.MaxStateDeltaKeys * (ValueDeltaMaxSize())
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *StateSchema) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -4706,6 +4955,12 @@ func (z *StateSchema) MsgIsZero() bool {
return ((*z).NumUint == 0) && ((*z).NumByteSlice == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func StateSchemaMaxSize() (s int) {
+ s = 1 + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *StateSchemas) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -5135,6 +5390,12 @@ func (z *StateSchemas) MsgIsZero() bool {
return (((*z).LocalStateSchema.NumUint == 0) && ((*z).LocalStateSchema.NumByteSlice == 0)) && (((*z).GlobalStateSchema.NumUint == 0) && ((*z).GlobalStateSchema.NumByteSlice == 0))
}
+// MaxSize returns a maximum valid message size for this message type
+func StateSchemasMaxSize() (s int) {
+ s = 1 + 5 + 1 + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size + 5 + 1 + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z Status) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -5181,6 +5442,12 @@ func (z Status) MsgIsZero() bool {
return z == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func StatusMaxSize() (s int) {
+ s = msgp.ByteSize
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z TealKeyValue) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -5273,6 +5540,16 @@ func (z TealKeyValue) MsgIsZero() bool {
return len(z) == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func TealKeyValueMaxSize() (s int) {
+ s += msgp.MapHeaderSize
+ // Adding size of map keys for z
+ s += EncodedMaxKeyValueEntries * (msgp.StringPrefixSize + config.MaxAppBytesKeyLen)
+ // Adding size of map values for z
+ s += EncodedMaxKeyValueEntries * (TealValueMaxSize())
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z TealType) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -5319,6 +5596,12 @@ func (z TealType) MsgIsZero() bool {
return z == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func TealTypeMaxSize() (s int) {
+ s = msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *TealValue) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -5479,6 +5762,14 @@ func (z *TealValue) MsgIsZero() bool {
return ((*z).Type == 0) && ((*z).Bytes == "") && ((*z).Uint == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func TealValueMaxSize() (s int) {
+ s = 1 + 3 + msgp.Uint64Size + 3
+ panic("Unable to determine max size: String type z.Bytes is unbounded")
+ s += 3 + msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *ValueDelta) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -5551,6 +5842,16 @@ func (z *ValueDelta) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0001 > 0 {
zb0001--
+ var zb0004 int
+ zb0004, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Bytes")
+ return
+ }
+ if zb0004 > config.MaxAppBytesValueLen {
+ err = msgp.ErrOverflow(uint64(zb0004), uint64(config.MaxAppBytesValueLen))
+ return
+ }
(*z).Bytes, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Bytes")
@@ -5590,15 +5891,25 @@ func (z *ValueDelta) UnmarshalMsg(bts []byte) (o []byte, err error) {
switch string(field) {
case "at":
{
- var zb0004 uint64
- zb0004, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0005 uint64
+ zb0005, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Action")
return
}
- (*z).Action = DeltaAction(zb0004)
+ (*z).Action = DeltaAction(zb0005)
}
case "bs":
+ var zb0006 int
+ zb0006, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Bytes")
+ return
+ }
+ if zb0006 > config.MaxAppBytesValueLen {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(config.MaxAppBytesValueLen))
+ return
+ }
(*z).Bytes, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bytes")
@@ -5638,3 +5949,9 @@ func (z *ValueDelta) Msgsize() (s int) {
func (z *ValueDelta) MsgIsZero() bool {
return ((*z).Action == 0) && ((*z).Bytes == "") && ((*z).Uint == 0)
}
+
+// MaxSize returns a maximum valid message size for this message type
+func ValueDeltaMaxSize() (s int) {
+ s = 1 + 3 + msgp.Uint64Size + 3 + msgp.StringPrefixSize + config.MaxAppBytesValueLen + 3 + msgp.Uint64Size
+ return
+}
diff --git a/data/basics/overflow.go b/data/basics/overflow.go
index c52f99572..45ec018a7 100644
--- a/data/basics/overflow.go
+++ b/data/basics/overflow.go
@@ -17,8 +17,9 @@
package basics
import (
- "math"
"math/bits"
+
+ "golang.org/x/exp/constraints"
)
// OverflowTracker is used to track when an operation causes an overflow
@@ -26,43 +27,22 @@ type OverflowTracker struct {
Overflowed bool
}
-// OAdd16 adds 2 uint16 values with overflow detection
-func OAdd16(a uint16, b uint16) (res uint16, overflowed bool) {
- res = a + b
- overflowed = res < a
- return
-}
-
-// OAdd32 adds 2 uint32 values with overflow detection
-func OAdd32(a uint32, b uint32) (res uint32, overflowed bool) {
- res = a + b
- overflowed = res < a
- return
-}
-
// OAdd adds 2 values with overflow detection
-func OAdd(a uint64, b uint64) (res uint64, overflowed bool) {
+func OAdd[T constraints.Unsigned](a, b T) (res T, overflowed bool) {
res = a + b
overflowed = res < a
return
}
// OSub subtracts b from a with overflow detection
-func OSub(a uint64, b uint64) (res uint64, overflowed bool) {
- res = a - b
- overflowed = res > a
- return
-}
-
-// OSub32 subtracts b from a with overflow detection
-func OSub32(a uint32, b uint32) (res uint32, overflowed bool) {
+func OSub[T constraints.Unsigned](a, b T) (res T, overflowed bool) {
res = a - b
overflowed = res > a
return
}
// OMul multiplies 2 values with overflow detection
-func OMul(a uint64, b uint64) (res uint64, overflowed bool) {
+func OMul[T constraints.Unsigned](a, b T) (res T, overflowed bool) {
if b == 0 {
return 0, false
}
@@ -75,34 +55,27 @@ func OMul(a uint64, b uint64) (res uint64, overflowed bool) {
}
// MulSaturate multiplies 2 values with saturation on overflow
-func MulSaturate(a uint64, b uint64) uint64 {
+func MulSaturate[T constraints.Unsigned](a, b T) T {
res, overflowed := OMul(a, b)
if overflowed {
- return math.MaxUint64
+ var defaultT T
+ return ^defaultT
}
return res
}
// AddSaturate adds 2 values with saturation on overflow
-func AddSaturate(a uint64, b uint64) uint64 {
+func AddSaturate[T constraints.Unsigned](a, b T) T {
res, overflowed := OAdd(a, b)
if overflowed {
- return math.MaxUint64
- }
- return res
-}
-
-// AddSaturate32 adds 2 uint32 values with saturation on overflow
-func AddSaturate32(a uint32, b uint32) uint32 {
- res, overflowed := OAdd32(a, b)
- if overflowed {
- return math.MaxUint32
+ var defaultT T
+ return ^defaultT
}
return res
}
// SubSaturate subtracts 2 values with saturation on underflow
-func SubSaturate(a uint64, b uint64) uint64 {
+func SubSaturate[T constraints.Unsigned](a, b T) T {
res, overflowed := OSub(a, b)
if overflowed {
return 0
@@ -110,26 +83,8 @@ func SubSaturate(a uint64, b uint64) uint64 {
return res
}
-// SubSaturate32 subtracts 2 uint32 values with saturation on underflow
-func SubSaturate32(a uint32, b uint32) uint32 {
- res, overflowed := OSub32(a, b)
- if overflowed {
- return 0
- }
- return res
-}
-
-// Add16 adds 2 uint16 values with overflow detection
-func (t *OverflowTracker) Add16(a uint16, b uint16) uint16 {
- res, overflowed := OAdd16(a, b)
- if overflowed {
- t.Overflowed = true
- }
- return res
-}
-
// Add adds 2 values with overflow detection
-func (t *OverflowTracker) Add(a uint64, b uint64) uint64 {
+func (t *OverflowTracker) Add(a, b uint64) uint64 {
res, overflowed := OAdd(a, b)
if overflowed {
t.Overflowed = true
@@ -138,7 +93,7 @@ func (t *OverflowTracker) Add(a uint64, b uint64) uint64 {
}
// Sub subtracts b from a with overflow detection
-func (t *OverflowTracker) Sub(a uint64, b uint64) uint64 {
+func (t *OverflowTracker) Sub(a, b uint64) uint64 {
res, overflowed := OSub(a, b)
if overflowed {
t.Overflowed = true
@@ -146,8 +101,8 @@ func (t *OverflowTracker) Sub(a uint64, b uint64) uint64 {
return res
}
-// Mul multiplies b from a with overflow detection
-func (t *OverflowTracker) Mul(a uint64, b uint64) uint64 {
+// Mul multiplies b by a with overflow detection
+func (t *OverflowTracker) Mul(a, b uint64) uint64 {
res, overflowed := OMul(a, b)
if overflowed {
t.Overflowed = true
@@ -156,13 +111,13 @@ func (t *OverflowTracker) Mul(a uint64, b uint64) uint64 {
}
// OAddA adds 2 MicroAlgos values with overflow tracking
-func OAddA(a MicroAlgos, b MicroAlgos) (res MicroAlgos, overflowed bool) {
+func OAddA(a, b MicroAlgos) (res MicroAlgos, overflowed bool) {
res.Raw, overflowed = OAdd(a.Raw, b.Raw)
return
}
// OSubA subtracts b from a with overflow tracking
-func OSubA(a MicroAlgos, b MicroAlgos) (res MicroAlgos, overflowed bool) {
+func OSubA(a, b MicroAlgos) (res MicroAlgos, overflowed bool) {
res.Raw, overflowed = OSub(a.Raw, b.Raw)
return
}
@@ -173,23 +128,13 @@ func MulAIntSaturate(a MicroAlgos, b int) MicroAlgos {
}
// AddA adds 2 MicroAlgos values with overflow tracking
-func (t *OverflowTracker) AddA(a MicroAlgos, b MicroAlgos) MicroAlgos {
- return MicroAlgos{Raw: t.Add(uint64(a.Raw), uint64(b.Raw))}
+func (t *OverflowTracker) AddA(a, b MicroAlgos) MicroAlgos {
+ return MicroAlgos{Raw: t.Add(a.Raw, b.Raw)}
}
// SubA subtracts b from a with overflow tracking
-func (t *OverflowTracker) SubA(a MicroAlgos, b MicroAlgos) MicroAlgos {
- return MicroAlgos{Raw: t.Sub(uint64(a.Raw), uint64(b.Raw))}
-}
-
-// AddR adds 2 Round values with overflow tracking
-func (t *OverflowTracker) AddR(a Round, b Round) Round {
- return Round(t.Add(uint64(a), uint64(b)))
-}
-
-// SubR subtracts b from a with overflow tracking
-func (t *OverflowTracker) SubR(a Round, b Round) Round {
- return Round(t.Sub(uint64(a), uint64(b)))
+func (t *OverflowTracker) SubA(a, b MicroAlgos) MicroAlgos {
+ return MicroAlgos{Raw: t.Sub(a.Raw, b.Raw)}
}
// ScalarMulA multiplies an Algo amount by a scalar
diff --git a/data/basics/sort.go b/data/basics/sort.go
index 0d3c75d9d..aa4986304 100644
--- a/data/basics/sort.go
+++ b/data/basics/sort.go
@@ -22,6 +22,7 @@ import (
// SortUint64 implements sorting by uint64 keys for
// canonical encoding of maps in msgpack format.
+//
//msgp:ignore SortUint64
//msgp:sort uint64 SortUint64
type SortUint64 []uint64
@@ -32,6 +33,7 @@ func (a SortUint64) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// SortAssetIndex implements sorting by AssetIndex keys for
// canonical encoding of maps in msgpack format.
+//
//msgp:ignore SortAssetIndex
//msgp:sort AssetIndex SortAssetIndex
type SortAssetIndex []AssetIndex
@@ -42,6 +44,7 @@ func (a SortAssetIndex) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// SortAppIndex implements sorting by AppIndex keys for
// canonical encoding of maps in msgpack format.
+//
//msgp:ignore SortAppIndex
//msgp:sort AppIndex SortAppIndex
type SortAppIndex []AppIndex
@@ -52,6 +55,7 @@ func (a SortAppIndex) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// SortString implements sorting by string keys for
// canonical encoding of maps in msgpack format.
+//
//msgp:ignore SortString
//msgp:sort string SortString
type SortString []string
@@ -62,6 +66,7 @@ func (a SortString) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// SortAddress implements sorting by Address keys for
// canonical encoding of maps in msgpack format.
+//
//msgp:ignore SortAddress
//msgp:sort Address SortAddress
type SortAddress []Address
diff --git a/data/basics/stateProofParticipant.go b/data/basics/stateProofParticipant.go
index a2c4fc490..63f7849f8 100644
--- a/data/basics/stateProofParticipant.go
+++ b/data/basics/stateProofParticipant.go
@@ -73,6 +73,7 @@ func (p Participant) ToBeHashed() (protocol.HashID, []byte) {
// ParticipantsArray implements merklearray.Array and is used to commit
// to a Merkle tree of online accounts.
+//
//msgp:ignore ParticipantsArray
type ParticipantsArray []Participant
diff --git a/data/basics/teal.go b/data/basics/teal.go
index 9f247f2f3..412de5902 100644
--- a/data/basics/teal.go
+++ b/data/basics/teal.go
@@ -21,6 +21,7 @@ import (
"fmt"
"github.com/algorand/go-algorand/config"
+ "golang.org/x/exp/maps"
)
// DeltaAction is an enum of actions that may be performed when applying a
@@ -43,7 +44,7 @@ type ValueDelta struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
Action DeltaAction `codec:"at"`
- Bytes string `codec:"bs"`
+ Bytes string `codec:"bs,allocbound=config.MaxAppBytesValueLen"`
Uint uint64 `codec:"ui"`
}
@@ -69,31 +70,15 @@ func (vd *ValueDelta) ToTealValue() (value TealValue, ok bool) {
// StateDelta is a map from key/value store keys to ValueDeltas, indicating
// what should happen for that key
-//msgp:allocbound StateDelta config.MaxStateDeltaKeys
+//
+//msgp:allocbound StateDelta config.MaxStateDeltaKeys,config.MaxAppBytesKeyLen
type StateDelta map[string]ValueDelta
// Equal checks whether two StateDeltas are equal. We don't check for nilness
// equality because an empty map will encode/decode as nil. So if our generated
// map is empty but not nil, we want to equal a decoded nil off the wire.
func (sd StateDelta) Equal(o StateDelta) bool {
- // Lengths should be the same
- if len(sd) != len(o) {
- return false
- }
- // All keys and deltas should be the same
- for k, v := range sd {
- // Other StateDelta must contain key
- ov, ok := o[k]
- if !ok {
- return false
- }
-
- // Other StateDelta must have same value for key
- if ov != v {
- return false
- }
- }
- return true
+ return maps.Equal(sd, o)
}
// Valid checks whether the keys and values in a StateDelta conform to the
@@ -178,10 +163,10 @@ func (sm StateSchema) MinBalance(proto *config.ConsensusParams) (res MicroAlgos)
type TealType uint64
const (
- // TealBytesType represents the type of a byte slice in a TEAL program
+ // TealBytesType represents the type of byte slice in a TEAL program
TealBytesType TealType = 1
- // TealUintType represents the type of a uint in a TEAL program
+ // TealUintType represents the type of uint in a TEAL program
TealUintType TealType = 2
)
@@ -226,20 +211,14 @@ func (tv *TealValue) String() string {
// TealKeyValue represents a key/value store for use in an application's
// LocalState or GlobalState
-//msgp:allocbound TealKeyValue EncodedMaxKeyValueEntries
+//
+//msgp:allocbound TealKeyValue EncodedMaxKeyValueEntries,config.MaxAppBytesKeyLen
type TealKeyValue map[string]TealValue
// Clone returns a copy of a TealKeyValue that may be modified without
// affecting the original
func (tk TealKeyValue) Clone() TealKeyValue {
- if tk == nil {
- return nil
- }
- res := make(TealKeyValue, len(tk))
- for k, v := range tk {
- res[k] = v
- }
- return res
+ return maps.Clone(tk)
}
// ToStateSchema calculates the number of each value type in a TealKeyValue and
diff --git a/data/basics/units.go b/data/basics/units.go
index 8370ceda6..fbbb55eb9 100644
--- a/data/basics/units.go
+++ b/data/basics/units.go
@@ -108,6 +108,12 @@ func (a MicroAlgos) MsgIsZero() bool {
return a.Raw == 0
}
+// MicroAlgosMaxSize returns maximum possible msgp encoded size of MicroAlgos in bytes.
+// It is expected by msgp generated MaxSize functions
+func MicroAlgosMaxSize() (s int) {
+ return msgp.Uint64Size
+}
+
// Round represents a protocol round index
type Round uint64
diff --git a/data/basics/units_test.go b/data/basics/units_test.go
index 2359bc1c6..c674270be 100644
--- a/data/basics/units_test.go
+++ b/data/basics/units_test.go
@@ -27,6 +27,7 @@ import (
func TestSubSaturate(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
a := Round(1)
b := Round(2)
@@ -37,23 +38,126 @@ func TestSubSaturate(t *testing.T) {
func TestSubSaturate32(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
- require.Equal(t, uint32(0), SubSaturate32(0, 1))
- require.Equal(t, uint32(0), SubSaturate32(1, 2))
- require.Equal(t, uint32(0), SubSaturate32(1, 1))
- require.Equal(t, uint32(0), SubSaturate32(1, math.MaxUint32))
- require.Equal(t, uint32(1), SubSaturate32(2, 1))
- require.Equal(t, uint32(math.MaxUint32-1), SubSaturate32(math.MaxUint32, 1))
+ require.Equal(t, uint32(0), SubSaturate(uint32(0), uint32(1)))
+ require.Equal(t, uint32(0), SubSaturate(uint32(1), uint32(2)))
+ require.Equal(t, uint32(0), SubSaturate(uint32(1), uint32(1)))
+ require.Equal(t, uint32(0), SubSaturate(uint32(1), math.MaxUint32))
+ require.Equal(t, uint32(1), SubSaturate(uint32(2), uint32(1)))
+ require.Equal(t, uint32(math.MaxUint32-1), SubSaturate(math.MaxUint32, uint32(1)))
}
func TestAddSaturate32(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ require.Equal(t, uint32(1), AddSaturate(uint32(0), uint32(1)))
+ require.Equal(t, uint32(math.MaxUint32-1), AddSaturate(math.MaxUint32-2, uint32(1)))
+ require.Equal(t, uint32(math.MaxUint32), AddSaturate(math.MaxUint32, uint32(0)))
+ require.Equal(t, uint32(math.MaxUint32), AddSaturate(math.MaxUint32-1, uint32(1)))
+ require.Equal(t, uint32(math.MaxUint32), AddSaturate(math.MaxUint32, uint32(2)))
+}
+
+func BenchmarkAddSaturateGenerics(b *testing.B) {
+ startVar := uint64(0xdeadbeef)
+ for n := uint64(0); n < uint64(b.N); n++ {
+ temp := AddSaturate(n, startVar)
+ startVar = temp
+ }
+}
+
+// oldOAdd adds 2 values with overflow detection
+func oldOAdd(a uint64, b uint64) (res uint64, overflowed bool) {
+ res = a + b
+ overflowed = res < a
+ return
+}
+
+// addSaturateU64Old adds 2 values with saturation on overflow (OLD IMPLEMENTATION)
+func addSaturateU64Old(a uint64, b uint64) uint64 {
+ res, overflowed := oldOAdd(a, b)
+ if overflowed {
+ return math.MaxUint64
+ }
+ return res
+}
+
+func BenchmarkAddSaturateU64Old(b *testing.B) {
+ startVar := uint64(0xdeadbeef)
+ for n := uint64(0); n < uint64(b.N); n++ {
+ temp := addSaturateU64Old(n, startVar)
+ startVar = temp
+ }
+}
+
+func BenchmarkSubSaturateGenerics(b *testing.B) {
+ startVar := uint64(0xdeadbeef)
+ for n := uint64(0); n < uint64(b.N); n++ {
+ temp := SubSaturate(n, startVar)
+ startVar = temp
+ }
+}
- require.Equal(t, uint32(1), AddSaturate32(0, 1))
- require.Equal(t, uint32(math.MaxUint32-1), AddSaturate32(math.MaxUint32-2, 1))
- require.Equal(t, uint32(math.MaxUint32), AddSaturate32(math.MaxUint32, 0))
- require.Equal(t, uint32(math.MaxUint32), AddSaturate32(math.MaxUint32-1, 1))
- require.Equal(t, uint32(math.MaxUint32), AddSaturate32(math.MaxUint32, 2))
+// oldOSub subtracts b from a with overflow detection
+func oldOSub(a uint64, b uint64) (res uint64, overflowed bool) {
+ res = a - b
+ overflowed = res > a
+ return
+}
+
+// subSaturateU64Old subtracts 2 values with saturation on underflow (OLD IMPLEMENTATION)
+func subSaturateU64Old(a uint64, b uint64) uint64 {
+ res, overflowed := oldOSub(a, b)
+ if overflowed {
+ return 0
+ }
+ return res
+}
+
+func BenchmarkSubSaturateU64Old(b *testing.B) {
+ startVar := uint64(0xdeadbeef)
+ for n := uint64(0); n < uint64(b.N); n++ {
+ temp := subSaturateU64Old(n, startVar)
+ startVar = temp
+ }
+}
+
+func BenchmarkMulSaturateGenerics(b *testing.B) {
+ startVar := uint64(0xdeadbeef)
+ for n := uint64(1); n <= uint64(b.N); n++ {
+ temp := MulSaturate(n, startVar)
+ startVar = temp
+ }
+}
+
+// oldOMul multiplies 2 values with overflow detection
+func oldOMul(a uint64, b uint64) (res uint64, overflowed bool) {
+ if b == 0 {
+ return 0, false
+ }
+ c := a * b
+ if c/b != a {
+ return 0, true
+ }
+ return c, false
+}
+
+// mulSaturateU64Old multiplies 2 values with saturation on overflow (OLD IMPLEMENTATION)
+func mulSaturateU64Old(a uint64, b uint64) uint64 {
+ res, overflowed := oldOMul(a, b)
+ if overflowed {
+ return math.MaxUint64
+ }
+ return res
+}
+
+func BenchmarkMulSaturateU64Old(b *testing.B) {
+ startVar := uint64(0xdeadbeef)
+ for n := uint64(1); n <= uint64(b.N); n++ {
+ temp := mulSaturateU64Old(n, startVar)
+ startVar = temp
+ }
}
func TestRoundUpToMultipleOf(t *testing.T) {
diff --git a/data/basics/userBalance.go b/data/basics/userBalance.go
index f10c32635..e87517f28 100644
--- a/data/basics/userBalance.go
+++ b/data/basics/userBalance.go
@@ -26,6 +26,7 @@ import (
"github.com/algorand/go-algorand/crypto/merklesignature"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
+ "golang.org/x/exp/slices"
)
// Status is the delegation status of an account's MicroAlgos
@@ -105,6 +106,7 @@ type VotingData struct {
}
// OnlineAccountData contains the voting information for a single account.
+//
//msgp:ignore OnlineAccountData
type OnlineAccountData struct {
MicroAlgosWithRewards MicroAlgos
@@ -267,10 +269,8 @@ type StateSchemas struct {
// affecting the original
func (ap *AppParams) Clone() (res AppParams) {
res = *ap
- res.ApprovalProgram = make([]byte, len(ap.ApprovalProgram))
- copy(res.ApprovalProgram, ap.ApprovalProgram)
- res.ClearStateProgram = make([]byte, len(ap.ClearStateProgram))
- copy(res.ClearStateProgram, ap.ClearStateProgram)
+ res.ApprovalProgram = slices.Clone(ap.ApprovalProgram)
+ res.ClearStateProgram = slices.Clone(ap.ClearStateProgram)
res.GlobalState = ap.GlobalState.Clone()
return
}
@@ -372,14 +372,14 @@ type AssetParams struct {
// UnitName specifies a hint for the name of a unit of
// this asset.
- UnitName string `codec:"un"`
+ UnitName string `codec:"un,allocbound=config.MaxAssetUnitNameBytes"`
// AssetName specifies a hint for the name of the asset.
- AssetName string `codec:"an"`
+ AssetName string `codec:"an,allocbound=config.MaxAssetNameBytes"`
// URL specifies a URL where more information about the asset can be
// retrieved
- URL string `codec:"au"`
+ URL string `codec:"au,allocbound=config.MaxAssetURLBytes"`
// MetadataHash specifies a commitment to some unspecified asset
// metadata. The format of this metadata is up to the application.
@@ -572,7 +572,7 @@ func (u AccountData) IsZero() bool {
return reflect.DeepEqual(u, AccountData{})
}
-// NormalizedOnlineBalance returns a ``normalized'' balance for this account.
+// NormalizedOnlineBalance returns a “normalized” balance for this account.
//
// The normalization compensates for rewards that have not yet been applied,
// by computing a balance normalized to round 0. To normalize, we estimate
@@ -594,7 +594,7 @@ func (u AccountData) NormalizedOnlineBalance(proto config.ConsensusParams) uint6
return NormalizedOnlineAccountBalance(u.Status, u.RewardsBase, u.MicroAlgos, proto)
}
-// NormalizedOnlineAccountBalance returns a ``normalized'' balance for an account
+// NormalizedOnlineAccountBalance returns a “normalized” balance for an account
// with the given parameters.
//
// The normalization compensates for rewards that have not yet been applied,
diff --git a/data/bookkeeping/block.go b/data/bookkeeping/block.go
index 344148afa..39842aa75 100644
--- a/data/bookkeeping/block.go
+++ b/data/bookkeeping/block.go
@@ -53,7 +53,7 @@ type (
TimeStamp int64 `codec:"ts"`
// Genesis ID to which this block belongs.
- GenesisID string `codec:"gen"`
+ GenesisID string `codec:"gen,allocbound=config.MaxGenesisIDLen"`
// Genesis hash to which this block belongs.
GenesisHash crypto.Digest `codec:"gh"`
@@ -109,14 +109,9 @@ type (
UpgradeState
UpgradeVote
- // TxnCounter counts the number of transactions committed in the
- // ledger, from the time at which support for this feature was
- // introduced.
- //
- // Specifically, TxnCounter is the number of the next transaction
- // that will be committed after this block. It is 0 when no
- // transactions have ever been committed (since TxnCounter
- // started being supported).
+ // TxnCounter is the number of the next transaction that will be
+ // committed after this block. Genesis blocks can start at either
+ // 0 or 1000, depending on a consensus parameter (AppForbidLowResources).
TxnCounter uint64 `codec:"tc"`
// StateProofTracking tracks the status of the state proofs, potentially
@@ -239,7 +234,7 @@ type (
// A Block contains the Payset and metadata corresponding to a given Round.
Block struct {
BlockHeader
- Payset transactions.Payset `codec:"txns"`
+ Payset transactions.Payset `codec:"txns,maxtotalbytes=config.MaxTxnBytesPerBlock"`
}
)
diff --git a/data/bookkeeping/genesis.go b/data/bookkeeping/genesis.go
index d750b0eaf..a6e8eb6e5 100644
--- a/data/bookkeeping/genesis.go
+++ b/data/bookkeeping/genesis.go
@@ -23,6 +23,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklesignature"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/committee"
"github.com/algorand/go-algorand/data/transactions"
@@ -120,7 +121,7 @@ func (genesis Genesis) Balances() (GenesisBalances, error) {
return GenesisBalances{}, fmt.Errorf("repeated allocation to %s", entry.Address)
}
- genalloc[addr] = entry.State
+ genalloc[addr] = entry.State.AccountData()
}
feeSink, err := basics.UnmarshalChecksumAddress(genesis.FeeSink)
@@ -159,7 +160,7 @@ type GenesisAllocation struct {
Address string `codec:"addr"`
Comment string `codec:"comment"`
- State basics.AccountData `codec:"state"`
+ State GenesisAccountData `codec:"state"`
}
// ToBeHashed impements the crypto.Hashable interface.
@@ -175,6 +176,34 @@ type GenesisBalances struct {
Timestamp int64
}
+// GenesisAccountData contains a subset of account information that is present in the genesis file.
+type GenesisAccountData struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ Status basics.Status `codec:"onl"`
+ MicroAlgos basics.MicroAlgos `codec:"algo"`
+ VoteID crypto.OneTimeSignatureVerifier `codec:"vote"`
+ StateProofID merklesignature.Commitment `codec:"stprf"`
+ SelectionID crypto.VRFVerifier `codec:"sel"`
+ VoteFirstValid basics.Round `codec:"voteFst"`
+ VoteLastValid basics.Round `codec:"voteLst"`
+ VoteKeyDilution uint64 `codec:"voteKD"`
+}
+
+// AccountData returns a basics.AccountData type for this genesis account.
+func (ga *GenesisAccountData) AccountData() basics.AccountData {
+ return basics.AccountData{
+ Status: ga.Status,
+ MicroAlgos: ga.MicroAlgos,
+ VoteID: ga.VoteID,
+ StateProofID: ga.StateProofID,
+ SelectionID: ga.SelectionID,
+ VoteFirstValid: ga.VoteFirstValid,
+ VoteLastValid: ga.VoteLastValid,
+ VoteKeyDilution: ga.VoteKeyDilution,
+ }
+}
+
// MakeGenesisBalances returns the information needed to bootstrap the ledger based on the current time
func MakeGenesisBalances(balances map[basics.Address]basics.AccountData, feeSink, rewardsPool basics.Address) GenesisBalances {
return MakeTimestampedGenesisBalances(balances, feeSink, rewardsPool, time.Now().Unix())
diff --git a/data/bookkeeping/genesis_test.go b/data/bookkeeping/genesis_test.go
index 5810e079d..72e94947f 100644
--- a/data/bookkeeping/genesis_test.go
+++ b/data/bookkeeping/genesis_test.go
@@ -50,7 +50,7 @@ func TestGenesis_Balances(t *testing.T) {
_struct: struct{}{},
Address: addr,
Comment: "",
- State: basics.AccountData{
+ State: GenesisAccountData{
MicroAlgos: basics.MicroAlgos{Raw: algos},
},
}
@@ -79,7 +79,7 @@ func TestGenesis_Balances(t *testing.T) {
},
want: GenesisBalances{
Balances: map[basics.Address]basics.AccountData{
- mustAddr(allocation1.Address): allocation1.State,
+ mustAddr(allocation1.Address): allocation1.State.AccountData(),
},
FeeSink: goodAddr,
RewardsPool: goodAddr,
@@ -96,8 +96,8 @@ func TestGenesis_Balances(t *testing.T) {
},
want: GenesisBalances{
Balances: map[basics.Address]basics.AccountData{
- mustAddr(allocation1.Address): allocation1.State,
- mustAddr(allocation2.Address): allocation2.State,
+ mustAddr(allocation1.Address): allocation1.State.AccountData(),
+ mustAddr(allocation2.Address): allocation2.State.AccountData(),
},
FeeSink: goodAddr,
RewardsPool: goodAddr,
diff --git a/data/bookkeeping/msgp_gen.go b/data/bookkeeping/msgp_gen.go
index de90680aa..6ccafb53a 100644
--- a/data/bookkeeping/msgp_gen.go
+++ b/data/bookkeeping/msgp_gen.go
@@ -9,7 +9,9 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklesignature"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/committee"
"github.com/algorand/go-algorand/protocol"
)
@@ -21,6 +23,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> BlockMaxSize()
//
// BlockHash
// |-----> (*) MarshalMsg
@@ -37,6 +40,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> BlockHeaderMaxSize()
//
// Genesis
// |-----> (*) MarshalMsg
@@ -45,6 +49,16 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> GenesisMaxSize()
+//
+// GenesisAccountData
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+// |-----> GenesisAccountDataMaxSize()
//
// GenesisAllocation
// |-----> (*) MarshalMsg
@@ -53,6 +67,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> GenesisAllocationMaxSize()
//
// LightBlockHeader
// |-----> (*) MarshalMsg
@@ -61,6 +76,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> LightBlockHeaderMaxSize()
//
// ParticipationUpdates
// |-----> (*) MarshalMsg
@@ -69,6 +85,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> ParticipationUpdatesMaxSize()
//
// RewardsState
// |-----> (*) MarshalMsg
@@ -77,6 +94,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> RewardsStateMaxSize()
//
// StateProofTrackingData
// |-----> (*) MarshalMsg
@@ -85,6 +103,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> StateProofTrackingDataMaxSize()
//
// TxnCommitments
// |-----> (*) MarshalMsg
@@ -93,6 +112,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> TxnCommitmentsMaxSize()
//
// UpgradeVote
// |-----> (*) MarshalMsg
@@ -101,6 +121,10 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> UpgradeVoteMaxSize()
+//
+// crypto.Digest
+// |-----> crypto.DigestMaxSize()
//
// MarshalMsg implements msgp.Marshaler
@@ -440,6 +464,16 @@ func (z *Block) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0004 > 0 {
zb0004--
+ var zb0006 int
+ zb0006, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "GenesisID")
+ return
+ }
+ if zb0006 > config.MaxGenesisIDLen {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(config.MaxGenesisIDLen))
+ return
+ }
(*z).BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisID")
@@ -576,27 +610,27 @@ func (z *Block) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0004 > 0 {
zb0004--
- var zb0006 int
- var zb0007 bool
- zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
- if zb0006 > protocol.NumStateProofTypes {
- err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumStateProofTypes))
+ if zb0007 > protocol.NumStateProofTypes {
+ err = msgp.ErrOverflow(uint64(zb0007), uint64(protocol.NumStateProofTypes))
err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
- if zb0007 {
+ if zb0008 {
(*z).BlockHeader.StateProofTracking = nil
} else if (*z).BlockHeader.StateProofTracking == nil {
- (*z).BlockHeader.StateProofTracking = make(map[protocol.StateProofType]StateProofTrackingData, zb0006)
+ (*z).BlockHeader.StateProofTracking = make(map[protocol.StateProofType]StateProofTrackingData, zb0007)
}
- for zb0006 > 0 {
+ for zb0007 > 0 {
var zb0001 protocol.StateProofType
var zb0002 StateProofTrackingData
- zb0006--
+ zb0007--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
@@ -612,24 +646,24 @@ func (z *Block) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0004 > 0 {
zb0004--
- var zb0008 int
- var zb0009 bool
- zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0009 int
+ var zb0010 bool
+ zb0009, zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
return
}
- if zb0008 > config.MaxProposedExpiredOnlineAccounts {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxProposedExpiredOnlineAccounts))
+ if zb0009 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0009), uint64(config.MaxProposedExpiredOnlineAccounts))
err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
return
}
- if zb0009 {
+ if zb0010 {
(*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
- } else if (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0008 {
- (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0008]
+ } else if (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0009 {
+ (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0009]
} else {
- (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0008)
+ (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0009)
}
for zb0003 := range (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
bts, err = (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
@@ -707,6 +741,16 @@ func (z *Block) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "gen":
+ var zb0011 int
+ zb0011, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "GenesisID")
+ return
+ }
+ if zb0011 > config.MaxGenesisIDLen {
+ err = msgp.ErrOverflow(uint64(zb0011), uint64(config.MaxGenesisIDLen))
+ return
+ }
(*z).BlockHeader.GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "GenesisID")
@@ -809,27 +853,27 @@ func (z *Block) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "spt":
- var zb0010 int
- var zb0011 bool
- zb0010, zb0011, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0012 int
+ var zb0013 bool
+ zb0012, zb0013, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StateProofTracking")
return
}
- if zb0010 > protocol.NumStateProofTypes {
- err = msgp.ErrOverflow(uint64(zb0010), uint64(protocol.NumStateProofTypes))
+ if zb0012 > protocol.NumStateProofTypes {
+ err = msgp.ErrOverflow(uint64(zb0012), uint64(protocol.NumStateProofTypes))
err = msgp.WrapError(err, "StateProofTracking")
return
}
- if zb0011 {
+ if zb0013 {
(*z).BlockHeader.StateProofTracking = nil
} else if (*z).BlockHeader.StateProofTracking == nil {
- (*z).BlockHeader.StateProofTracking = make(map[protocol.StateProofType]StateProofTrackingData, zb0010)
+ (*z).BlockHeader.StateProofTracking = make(map[protocol.StateProofType]StateProofTrackingData, zb0012)
}
- for zb0010 > 0 {
+ for zb0012 > 0 {
var zb0001 protocol.StateProofType
var zb0002 StateProofTrackingData
- zb0010--
+ zb0012--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "StateProofTracking")
@@ -843,24 +887,24 @@ func (z *Block) UnmarshalMsg(bts []byte) (o []byte, err error) {
(*z).BlockHeader.StateProofTracking[zb0001] = zb0002
}
case "partupdrmv":
- var zb0012 int
- var zb0013 bool
- zb0012, zb0013, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0014 int
+ var zb0015 bool
+ zb0014, zb0015, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ExpiredParticipationAccounts")
return
}
- if zb0012 > config.MaxProposedExpiredOnlineAccounts {
- err = msgp.ErrOverflow(uint64(zb0012), uint64(config.MaxProposedExpiredOnlineAccounts))
+ if zb0014 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0014), uint64(config.MaxProposedExpiredOnlineAccounts))
err = msgp.WrapError(err, "ExpiredParticipationAccounts")
return
}
- if zb0013 {
+ if zb0015 {
(*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = nil
- } else if (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0012 {
- (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0012]
+ } else if (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) >= zb0014 {
+ (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = ((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts)[:zb0014]
} else {
- (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0012)
+ (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0014)
}
for zb0003 := range (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts {
bts, err = (*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
@@ -916,6 +960,23 @@ func (z *Block) MsgIsZero() bool {
return ((*z).BlockHeader.Round.MsgIsZero()) && ((*z).BlockHeader.Branch.MsgIsZero()) && ((*z).BlockHeader.Seed.MsgIsZero()) && ((*z).BlockHeader.TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).BlockHeader.TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).BlockHeader.TimeStamp == 0) && ((*z).BlockHeader.GenesisID == "") && ((*z).BlockHeader.GenesisHash.MsgIsZero()) && ((*z).BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).BlockHeader.RewardsState.RewardsRate == 0) && ((*z).BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).BlockHeader.TxnCounter == 0) && (len((*z).BlockHeader.StateProofTracking) == 0) && (len((*z).BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts) == 0) && ((*z).Payset.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func BlockMaxSize() (s int) {
+ s = 3 + 4 + basics.RoundMaxSize() + 5 + BlockHashMaxSize() + 5 + committee.SeedMaxSize() + 4 + crypto.DigestMaxSize() + 7 + crypto.DigestMaxSize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + config.MaxGenesisIDLen + 3 + crypto.DigestMaxSize() + 5 + basics.AddressMaxSize() + 4 + basics.AddressMaxSize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + basics.RoundMaxSize() + 6 + protocol.ConsensusVersionMaxSize() + 10 + protocol.ConsensusVersionMaxSize() + 8 + msgp.Uint64Size + 11 + basics.RoundMaxSize() + 11 + basics.RoundMaxSize() + 12 + protocol.ConsensusVersionMaxSize() + 13 + basics.RoundMaxSize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4
+ s += msgp.MapHeaderSize
+ // Adding size of map keys for z.BlockHeader.StateProofTracking
+ s += protocol.NumStateProofTypes * (protocol.StateProofTypeMaxSize())
+ // Adding size of map values for z.BlockHeader.StateProofTracking
+ s += protocol.NumStateProofTypes * (StateProofTrackingDataMaxSize())
+ s += 11
+ // Calculating size of slice: z.BlockHeader.ParticipationUpdates.ExpiredParticipationAccounts
+ s += msgp.ArrayHeaderSize + ((config.MaxProposedExpiredOnlineAccounts) * (basics.AddressMaxSize()))
+ s += 5
+ // Using maxtotalbytes for: z.Payset
+ s += config.MaxTxnBytesPerBlock
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *BlockHash) MarshalMsg(b []byte) []byte {
return ((*(crypto.Digest))(z)).MarshalMsg(b)
@@ -944,6 +1005,11 @@ func (z *BlockHash) MsgIsZero() bool {
return ((*(crypto.Digest))(z)).MsgIsZero()
}
+// MaxSize returns a maximum valid message size for this message type
+func BlockHashMaxSize() int {
+ return crypto.DigestMaxSize()
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *BlockHeader) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1272,6 +1338,16 @@ func (z *BlockHeader) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0004 > 0 {
zb0004--
+ var zb0006 int
+ zb0006, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "GenesisID")
+ return
+ }
+ if zb0006 > config.MaxGenesisIDLen {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(config.MaxGenesisIDLen))
+ return
+ }
(*z).GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisID")
@@ -1408,27 +1484,27 @@ func (z *BlockHeader) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0004 > 0 {
zb0004--
- var zb0006 int
- var zb0007 bool
- zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
- if zb0006 > protocol.NumStateProofTypes {
- err = msgp.ErrOverflow(uint64(zb0006), uint64(protocol.NumStateProofTypes))
+ if zb0007 > protocol.NumStateProofTypes {
+ err = msgp.ErrOverflow(uint64(zb0007), uint64(protocol.NumStateProofTypes))
err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
return
}
- if zb0007 {
+ if zb0008 {
(*z).StateProofTracking = nil
} else if (*z).StateProofTracking == nil {
- (*z).StateProofTracking = make(map[protocol.StateProofType]StateProofTrackingData, zb0006)
+ (*z).StateProofTracking = make(map[protocol.StateProofType]StateProofTrackingData, zb0007)
}
- for zb0006 > 0 {
+ for zb0007 > 0 {
var zb0001 protocol.StateProofType
var zb0002 StateProofTrackingData
- zb0006--
+ zb0007--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "StateProofTracking")
@@ -1444,24 +1520,24 @@ func (z *BlockHeader) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0004 > 0 {
zb0004--
- var zb0008 int
- var zb0009 bool
- zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0009 int
+ var zb0010 bool
+ zb0009, zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
return
}
- if zb0008 > config.MaxProposedExpiredOnlineAccounts {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxProposedExpiredOnlineAccounts))
+ if zb0009 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0009), uint64(config.MaxProposedExpiredOnlineAccounts))
err = msgp.WrapError(err, "struct-from-array", "ExpiredParticipationAccounts")
return
}
- if zb0009 {
+ if zb0010 {
(*z).ParticipationUpdates.ExpiredParticipationAccounts = nil
- } else if (*z).ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).ParticipationUpdates.ExpiredParticipationAccounts) >= zb0008 {
- (*z).ParticipationUpdates.ExpiredParticipationAccounts = ((*z).ParticipationUpdates.ExpiredParticipationAccounts)[:zb0008]
+ } else if (*z).ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).ParticipationUpdates.ExpiredParticipationAccounts) >= zb0009 {
+ (*z).ParticipationUpdates.ExpiredParticipationAccounts = ((*z).ParticipationUpdates.ExpiredParticipationAccounts)[:zb0009]
} else {
- (*z).ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0008)
+ (*z).ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0009)
}
for zb0003 := range (*z).ParticipationUpdates.ExpiredParticipationAccounts {
bts, err = (*z).ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
@@ -1531,6 +1607,16 @@ func (z *BlockHeader) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "gen":
+ var zb0011 int
+ zb0011, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "GenesisID")
+ return
+ }
+ if zb0011 > config.MaxGenesisIDLen {
+ err = msgp.ErrOverflow(uint64(zb0011), uint64(config.MaxGenesisIDLen))
+ return
+ }
(*z).GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "GenesisID")
@@ -1633,27 +1719,27 @@ func (z *BlockHeader) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "spt":
- var zb0010 int
- var zb0011 bool
- zb0010, zb0011, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0012 int
+ var zb0013 bool
+ zb0012, zb0013, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StateProofTracking")
return
}
- if zb0010 > protocol.NumStateProofTypes {
- err = msgp.ErrOverflow(uint64(zb0010), uint64(protocol.NumStateProofTypes))
+ if zb0012 > protocol.NumStateProofTypes {
+ err = msgp.ErrOverflow(uint64(zb0012), uint64(protocol.NumStateProofTypes))
err = msgp.WrapError(err, "StateProofTracking")
return
}
- if zb0011 {
+ if zb0013 {
(*z).StateProofTracking = nil
} else if (*z).StateProofTracking == nil {
- (*z).StateProofTracking = make(map[protocol.StateProofType]StateProofTrackingData, zb0010)
+ (*z).StateProofTracking = make(map[protocol.StateProofType]StateProofTrackingData, zb0012)
}
- for zb0010 > 0 {
+ for zb0012 > 0 {
var zb0001 protocol.StateProofType
var zb0002 StateProofTrackingData
- zb0010--
+ zb0012--
bts, err = zb0001.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "StateProofTracking")
@@ -1667,24 +1753,24 @@ func (z *BlockHeader) UnmarshalMsg(bts []byte) (o []byte, err error) {
(*z).StateProofTracking[zb0001] = zb0002
}
case "partupdrmv":
- var zb0012 int
- var zb0013 bool
- zb0012, zb0013, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0014 int
+ var zb0015 bool
+ zb0014, zb0015, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ExpiredParticipationAccounts")
return
}
- if zb0012 > config.MaxProposedExpiredOnlineAccounts {
- err = msgp.ErrOverflow(uint64(zb0012), uint64(config.MaxProposedExpiredOnlineAccounts))
+ if zb0014 > config.MaxProposedExpiredOnlineAccounts {
+ err = msgp.ErrOverflow(uint64(zb0014), uint64(config.MaxProposedExpiredOnlineAccounts))
err = msgp.WrapError(err, "ExpiredParticipationAccounts")
return
}
- if zb0013 {
+ if zb0015 {
(*z).ParticipationUpdates.ExpiredParticipationAccounts = nil
- } else if (*z).ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).ParticipationUpdates.ExpiredParticipationAccounts) >= zb0012 {
- (*z).ParticipationUpdates.ExpiredParticipationAccounts = ((*z).ParticipationUpdates.ExpiredParticipationAccounts)[:zb0012]
+ } else if (*z).ParticipationUpdates.ExpiredParticipationAccounts != nil && cap((*z).ParticipationUpdates.ExpiredParticipationAccounts) >= zb0014 {
+ (*z).ParticipationUpdates.ExpiredParticipationAccounts = ((*z).ParticipationUpdates.ExpiredParticipationAccounts)[:zb0014]
} else {
- (*z).ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0012)
+ (*z).ParticipationUpdates.ExpiredParticipationAccounts = make([]basics.Address, zb0014)
}
for zb0003 := range (*z).ParticipationUpdates.ExpiredParticipationAccounts {
bts, err = (*z).ParticipationUpdates.ExpiredParticipationAccounts[zb0003].UnmarshalMsg(bts)
@@ -1733,6 +1819,20 @@ func (z *BlockHeader) MsgIsZero() bool {
return ((*z).Round.MsgIsZero()) && ((*z).Branch.MsgIsZero()) && ((*z).Seed.MsgIsZero()) && ((*z).TxnCommitments.NativeSha512_256Commitment.MsgIsZero()) && ((*z).TxnCommitments.Sha256Commitment.MsgIsZero()) && ((*z).TimeStamp == 0) && ((*z).GenesisID == "") && ((*z).GenesisHash.MsgIsZero()) && ((*z).RewardsState.FeeSink.MsgIsZero()) && ((*z).RewardsState.RewardsPool.MsgIsZero()) && ((*z).RewardsState.RewardsLevel == 0) && ((*z).RewardsState.RewardsRate == 0) && ((*z).RewardsState.RewardsResidue == 0) && ((*z).RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).UpgradeState.NextProtocol.MsgIsZero()) && ((*z).UpgradeState.NextProtocolApprovals == 0) && ((*z).UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).UpgradeVote.UpgradeApprove == false) && ((*z).TxnCounter == 0) && (len((*z).StateProofTracking) == 0) && (len((*z).ParticipationUpdates.ExpiredParticipationAccounts) == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func BlockHeaderMaxSize() (s int) {
+ s = 3 + 4 + basics.RoundMaxSize() + 5 + BlockHashMaxSize() + 5 + committee.SeedMaxSize() + 4 + crypto.DigestMaxSize() + 7 + crypto.DigestMaxSize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + config.MaxGenesisIDLen + 3 + crypto.DigestMaxSize() + 5 + basics.AddressMaxSize() + 4 + basics.AddressMaxSize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + basics.RoundMaxSize() + 6 + protocol.ConsensusVersionMaxSize() + 10 + protocol.ConsensusVersionMaxSize() + 8 + msgp.Uint64Size + 11 + basics.RoundMaxSize() + 11 + basics.RoundMaxSize() + 12 + protocol.ConsensusVersionMaxSize() + 13 + basics.RoundMaxSize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4
+ s += msgp.MapHeaderSize
+ // Adding size of map keys for z.StateProofTracking
+ s += protocol.NumStateProofTypes * (protocol.StateProofTypeMaxSize())
+ // Adding size of map values for z.StateProofTracking
+ s += protocol.NumStateProofTypes * (StateProofTrackingDataMaxSize())
+ s += 11
+ // Calculating size of slice: z.ParticipationUpdates.ExpiredParticipationAccounts
+ s += msgp.ArrayHeaderSize + ((config.MaxProposedExpiredOnlineAccounts) * (basics.AddressMaxSize()))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *Genesis) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2076,6 +2176,296 @@ func (z *Genesis) MsgIsZero() bool {
return ((*z).SchemaID == "") && ((*z).Network.MsgIsZero()) && ((*z).Proto.MsgIsZero()) && (len((*z).Allocation) == 0) && ((*z).RewardsPool == "") && ((*z).FeeSink == "") && ((*z).Timestamp == 0) && ((*z).Comment == "") && ((*z).DevMode == false)
}
+// MaxSize returns a maximum valid message size for this message type
+func GenesisMaxSize() (s int) {
+ s = 1 + 3
+ panic("Unable to determine max size: String type z.SchemaID is unbounded")
+ s += 8 + protocol.NetworkIDMaxSize() + 6 + protocol.ConsensusVersionMaxSize() + 6
+ // Calculating size of slice: z.Allocation
+ s += msgp.ArrayHeaderSize + ((MaxInitialGenesisAllocationSize) * (GenesisAllocationMaxSize()))
+ s += 4
+ panic("Unable to determine max size: String type z.RewardsPool is unbounded")
+ s += 5
+ panic("Unable to determine max size: String type z.FeeSink is unbounded")
+ s += 10 + msgp.Int64Size + 8
+ panic("Unable to determine max size: String type z.Comment is unbounded")
+ s += 8 + msgp.BoolSize
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *GenesisAccountData) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(8)
+ var zb0001Mask uint16 /* 9 bits */
+ if (*z).MicroAlgos.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
+ if (*z).Status.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ if (*z).SelectionID.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x8
+ }
+ if (*z).StateProofID.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x10
+ }
+ if (*z).VoteID.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x20
+ }
+ if (*z).VoteFirstValid.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x40
+ }
+ if (*z).VoteKeyDilution == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x80
+ }
+ if (*z).VoteLastValid.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x100
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "algo"
+ o = append(o, 0xa4, 0x61, 0x6c, 0x67, 0x6f)
+ o = (*z).MicroAlgos.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "onl"
+ o = append(o, 0xa3, 0x6f, 0x6e, 0x6c)
+ o = (*z).Status.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x8) == 0 { // if not empty
+ // string "sel"
+ o = append(o, 0xa3, 0x73, 0x65, 0x6c)
+ o = (*z).SelectionID.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x10) == 0 { // if not empty
+ // string "stprf"
+ o = append(o, 0xa5, 0x73, 0x74, 0x70, 0x72, 0x66)
+ o = (*z).StateProofID.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x20) == 0 { // if not empty
+ // string "vote"
+ o = append(o, 0xa4, 0x76, 0x6f, 0x74, 0x65)
+ o = (*z).VoteID.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x40) == 0 { // if not empty
+ // string "voteFst"
+ o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x46, 0x73, 0x74)
+ o = (*z).VoteFirstValid.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x80) == 0 { // if not empty
+ // string "voteKD"
+ o = append(o, 0xa6, 0x76, 0x6f, 0x74, 0x65, 0x4b, 0x44)
+ o = msgp.AppendUint64(o, (*z).VoteKeyDilution)
+ }
+ if (zb0001Mask & 0x100) == 0 { // if not empty
+ // string "voteLst"
+ o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x4c, 0x73, 0x74)
+ o = (*z).VoteLastValid.MarshalMsg(o)
+ }
+ }
+ return
+}
+
+func (_ *GenesisAccountData) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*GenesisAccountData)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *GenesisAccountData) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Status.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Status")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).MicroAlgos.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "MicroAlgos")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).VoteID.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteID")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).StateProofID.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "StateProofID")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).SelectionID.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "SelectionID")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).VoteFirstValid.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteFirstValid")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).VoteLastValid.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteLastValid")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).VoteKeyDilution, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VoteKeyDilution")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = GenesisAccountData{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "onl":
+ bts, err = (*z).Status.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Status")
+ return
+ }
+ case "algo":
+ bts, err = (*z).MicroAlgos.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "MicroAlgos")
+ return
+ }
+ case "vote":
+ bts, err = (*z).VoteID.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteID")
+ return
+ }
+ case "stprf":
+ bts, err = (*z).StateProofID.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "StateProofID")
+ return
+ }
+ case "sel":
+ bts, err = (*z).SelectionID.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "SelectionID")
+ return
+ }
+ case "voteFst":
+ bts, err = (*z).VoteFirstValid.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteFirstValid")
+ return
+ }
+ case "voteLst":
+ bts, err = (*z).VoteLastValid.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteLastValid")
+ return
+ }
+ case "voteKD":
+ (*z).VoteKeyDilution, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VoteKeyDilution")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *GenesisAccountData) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*GenesisAccountData)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *GenesisAccountData) Msgsize() (s int) {
+ s = 1 + 4 + (*z).Status.Msgsize() + 5 + (*z).MicroAlgos.Msgsize() + 5 + (*z).VoteID.Msgsize() + 6 + (*z).StateProofID.Msgsize() + 4 + (*z).SelectionID.Msgsize() + 8 + (*z).VoteFirstValid.Msgsize() + 8 + (*z).VoteLastValid.Msgsize() + 7 + msgp.Uint64Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *GenesisAccountData) MsgIsZero() bool {
+ return ((*z).Status.MsgIsZero()) && ((*z).MicroAlgos.MsgIsZero()) && ((*z).VoteID.MsgIsZero()) && ((*z).StateProofID.MsgIsZero()) && ((*z).SelectionID.MsgIsZero()) && ((*z).VoteFirstValid.MsgIsZero()) && ((*z).VoteLastValid.MsgIsZero()) && ((*z).VoteKeyDilution == 0)
+}
+
+// MaxSize returns a maximum valid message size for this message type
+func GenesisAccountDataMaxSize() (s int) {
+ s = 1 + 4 + basics.StatusMaxSize() + 5 + basics.MicroAlgosMaxSize() + 5 + crypto.OneTimeSignatureVerifierMaxSize() + 6 + merklesignature.CommitmentMaxSize() + 4 + crypto.VRFVerifierMaxSize() + 8 + basics.RoundMaxSize() + 8 + basics.RoundMaxSize() + 7 + msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *GenesisAllocation) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2204,6 +2594,16 @@ func (z *GenesisAllocation) MsgIsZero() bool {
return ((*z).Address == "") && ((*z).Comment == "") && ((*z).State.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func GenesisAllocationMaxSize() (s int) {
+ s = 1 + 5
+ panic("Unable to determine max size: String type z.Address is unbounded")
+ s += 8
+ panic("Unable to determine max size: String type z.Comment is unbounded")
+ s += 6 + GenesisAccountDataMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *LightBlockHeader) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2379,6 +2779,12 @@ func (z *LightBlockHeader) MsgIsZero() bool {
return ((*z).Seed.MsgIsZero()) && ((*z).Round.MsgIsZero()) && ((*z).GenesisHash.MsgIsZero()) && ((*z).Sha256TxnCommitment.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func LightBlockHeaderMaxSize() (s int) {
+ s = 1 + 2 + committee.SeedMaxSize() + 2 + basics.RoundMaxSize() + 3 + crypto.DigestMaxSize() + 3 + crypto.GenericDigestMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *ParticipationUpdates) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2537,6 +2943,14 @@ func (z *ParticipationUpdates) MsgIsZero() bool {
return (len((*z).ExpiredParticipationAccounts) == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func ParticipationUpdatesMaxSize() (s int) {
+ s = 1 + 11
+ // Calculating size of slice: z.ExpiredParticipationAccounts
+ s += msgp.ArrayHeaderSize + ((config.MaxProposedExpiredOnlineAccounts) * (basics.AddressMaxSize()))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *RewardsState) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2758,6 +3172,12 @@ func (z *RewardsState) MsgIsZero() bool {
return ((*z).FeeSink.MsgIsZero()) && ((*z).RewardsPool.MsgIsZero()) && ((*z).RewardsLevel == 0) && ((*z).RewardsRate == 0) && ((*z).RewardsResidue == 0) && ((*z).RewardsRecalculationRound.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func RewardsStateMaxSize() (s int) {
+ s = 1 + 5 + basics.AddressMaxSize() + 4 + basics.AddressMaxSize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + basics.RoundMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *StateProofTrackingData) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2910,6 +3330,12 @@ func (z *StateProofTrackingData) MsgIsZero() bool {
return ((*z).StateProofVotersCommitment.MsgIsZero()) && ((*z).StateProofOnlineTotalWeight.MsgIsZero()) && ((*z).StateProofNextRound.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func StateProofTrackingDataMaxSize() (s int) {
+ s = 1 + 2 + crypto.GenericDigestMaxSize() + 2 + basics.MicroAlgosMaxSize() + 2 + basics.RoundMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *TxnCommitments) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -3039,6 +3465,12 @@ func (z *TxnCommitments) MsgIsZero() bool {
return ((*z).NativeSha512_256Commitment.MsgIsZero()) && ((*z).Sha256Commitment.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func TxnCommitmentsMaxSize() (s int) {
+ s = 1 + 4 + crypto.DigestMaxSize() + 7 + crypto.DigestMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *UpgradeVote) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -3190,3 +3622,9 @@ func (z *UpgradeVote) Msgsize() (s int) {
func (z *UpgradeVote) MsgIsZero() bool {
return ((*z).UpgradePropose.MsgIsZero()) && ((*z).UpgradeDelay.MsgIsZero()) && ((*z).UpgradeApprove == false)
}
+
+// MaxSize returns a maximum valid message size for this message type
+func UpgradeVoteMaxSize() (s int) {
+ s = 1 + 12 + protocol.ConsensusVersionMaxSize() + 13 + basics.RoundMaxSize() + 11 + msgp.BoolSize
+ return
+}
diff --git a/data/bookkeeping/msgp_gen_test.go b/data/bookkeeping/msgp_gen_test.go
index 1f61ae825..b49bd7240 100644
--- a/data/bookkeeping/msgp_gen_test.go
+++ b/data/bookkeeping/msgp_gen_test.go
@@ -194,6 +194,66 @@ func BenchmarkUnmarshalGenesis(b *testing.B) {
}
}
+func TestMarshalUnmarshalGenesisAccountData(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := GenesisAccountData{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingGenesisAccountData(t *testing.T) {
+ protocol.RunEncodingTest(t, &GenesisAccountData{})
+}
+
+func BenchmarkMarshalMsgGenesisAccountData(b *testing.B) {
+ v := GenesisAccountData{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgGenesisAccountData(b *testing.B) {
+ v := GenesisAccountData{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalGenesisAccountData(b *testing.B) {
+ v := GenesisAccountData{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalGenesisAllocation(t *testing.T) {
partitiontest.PartitionTest(t)
v := GenesisAllocation{}
diff --git a/data/committee/committee.go b/data/committee/committee.go
index 263aac999..93703367e 100644
--- a/data/committee/committee.go
+++ b/data/committee/committee.go
@@ -39,6 +39,7 @@ type Selector interface {
// BalanceRecord pairs an account's address with its associated data.
//
// This struct is used to decouple LedgerReader.AccountData from basics.BalanceRecord.
+//
//msgp:ignore BalanceRecord
type BalanceRecord struct {
basics.OnlineAccountData
diff --git a/data/committee/credential.go b/data/committee/credential.go
index 8debf212a..6c6aac794 100644
--- a/data/committee/credential.go
+++ b/data/committee/credential.go
@@ -24,9 +24,9 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/committee/sortition"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/sortition"
)
type (
@@ -103,7 +103,7 @@ func (cred UnauthenticatedCredential) Verify(proto config.ConsensusParams, m Mem
} else if m.TotalMoney.IsZero() || expectedSelection == 0 || expectedSelection > float64(m.TotalMoney.Raw) {
logging.Base().Panicf("UnauthenticatedCredential.Verify: m.TotalMoney %v, expectedSelection %v", m.TotalMoney.Raw, expectedSelection)
} else if !userMoney.IsZero() {
- weight = sortition.Select(userMoney.Raw, m.TotalMoney.Raw, expectedSelection, h)
+ weight = sortition.Select(userMoney.Raw, m.TotalMoney.Raw, expectedSelection, sortition.Digest(h))
}
if weight == 0 {
diff --git a/data/committee/msgp_gen.go b/data/committee/msgp_gen.go
index ff9d7a95a..6427f3eb8 100644
--- a/data/committee/msgp_gen.go
+++ b/data/committee/msgp_gen.go
@@ -4,6 +4,9 @@ package committee
import (
"github.com/algorand/msgp/msgp"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
)
// The following msgp objects are implemented in this file:
@@ -14,6 +17,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> CredentialMaxSize()
//
// Seed
// |-----> (*) MarshalMsg
@@ -22,6 +26,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> SeedMaxSize()
//
// UnauthenticatedCredential
// |-----> (*) MarshalMsg
@@ -30,6 +35,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> UnauthenticatedCredentialMaxSize()
//
// hashableCredential
// |-----> (*) MarshalMsg
@@ -38,6 +44,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> HashableCredentialMaxSize()
//
// MarshalMsg implements msgp.Marshaler
@@ -238,6 +245,12 @@ func (z *Credential) MsgIsZero() bool {
return ((*z).Weight == 0) && ((*z).VrfOut.MsgIsZero()) && ((*z).DomainSeparationEnabled == false) && ((*z).Hashable.MsgIsZero()) && ((*z).UnauthenticatedCredential.Proof.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func CredentialMaxSize() (s int) {
+ s = 1 + 3 + msgp.Uint64Size + 2 + crypto.DigestMaxSize() + 3 + msgp.BoolSize + 3 + HashableCredentialMaxSize() + 3 + crypto.VrfProofMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *Seed) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -277,6 +290,13 @@ func (z *Seed) MsgIsZero() bool {
return (*z) == (Seed{})
}
+// MaxSize returns a maximum valid message size for this message type
+func SeedMaxSize() (s int) {
+ // Calculating size of array: z
+ s = msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *UnauthenticatedCredential) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -383,6 +403,12 @@ func (z *UnauthenticatedCredential) MsgIsZero() bool {
return ((*z).Proof.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func UnauthenticatedCredentialMaxSize() (s int) {
+ s = 1 + 3 + crypto.VrfProofMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *hashableCredential) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -534,3 +560,9 @@ func (z *hashableCredential) Msgsize() (s int) {
func (z *hashableCredential) MsgIsZero() bool {
return ((*z).RawOut.MsgIsZero()) && ((*z).Member.MsgIsZero()) && ((*z).Iter == 0)
}
+
+// MaxSize returns a maximum valid message size for this message type
+func HashableCredentialMaxSize() (s int) {
+ s = 1 + 2 + crypto.VrfOutputMaxSize() + 2 + basics.AddressMaxSize() + 2 + msgp.Uint64Size
+ return
+}
diff --git a/data/committee/sortition/sortition.cpp b/data/committee/sortition/sortition.cpp
deleted file mode 100644
index d2540ff2c..000000000
--- a/data/committee/sortition/sortition.cpp
+++ /dev/null
@@ -1,16 +0,0 @@
-#include "sortition.h"
-#include <boost/math/distributions/binomial.hpp>
-
-uint64_t sortition_binomial_cdf_walk(double n, double p, double ratio, uint64_t money) {
- boost::math::binomial_distribution<double> dist(n, p);
- for (uint64_t j = 0; j < money; j++) {
- // Get the cdf
- double boundary = cdf(dist, j);
-
- // Found the correct boundary, break
- if (ratio <= boundary) {
- return j;
- }
- }
- return money;
-}
diff --git a/data/committee/sortition/sortition.go b/data/committee/sortition/sortition.go
deleted file mode 100644
index 9bc5bac02..000000000
--- a/data/committee/sortition/sortition.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (C) 2019-2023 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package sortition
-
-// #cgo CFLAGS: -O3
-// #cgo CXXFLAGS: -std=c++11 -Wno-deprecated
-// #include <stdint.h>
-// #include <stdlib.h>
-// #include "sortition.h"
-import "C"
-
-import (
- "fmt"
- "math/big"
- "strings"
-
- "github.com/algorand/go-algorand/crypto"
-)
-
-const precision = uint(8 * (crypto.DigestSize + 1))
-
-var maxFloat *big.Float
-
-// Select runs the sortition function and returns the number of time the key was selected
-func Select(money uint64, totalMoney uint64, expectedSize float64, vrfOutput crypto.Digest) uint64 {
- binomialN := float64(money)
- binomialP := expectedSize / float64(totalMoney)
-
- t := &big.Int{}
- t.SetBytes(vrfOutput[:])
-
- h := big.Float{}
- h.SetPrec(precision)
- h.SetInt(t)
-
- ratio := big.Float{}
- cratio, _ := ratio.Quo(&h, maxFloat).Float64()
-
- return uint64(C.sortition_binomial_cdf_walk(C.double(binomialN), C.double(binomialP), C.double(cratio), C.uint64_t(money)))
-}
-
-func init() {
- var b int
- var err error
- maxFloatString := fmt.Sprintf("0x%s", strings.Repeat("ff", crypto.DigestSize))
- maxFloat, b, err = big.ParseFloat(maxFloatString, 0, precision, big.ToNearestEven)
- if b != 16 || err != nil {
- err = fmt.Errorf("failed to parse big float constant in sortition : %w", err)
- panic(err)
- }
-}
diff --git a/data/committee/sortition/sortition.h b/data/committee/sortition/sortition.h
deleted file mode 100644
index e00ba0239..000000000
--- a/data/committee/sortition/sortition.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef __SORTITION_H__
-#define __SORTITION_H__
-
-#include <stdint.h>
-#include <stdlib.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
- uint64_t sortition_binomial_cdf_walk(double n, double p, double ratio, uint64_t money);
-#ifdef __cplusplus
-}
-
-#endif
-
-#endif
diff --git a/data/committee/sortition/sortition_test.go b/data/committee/sortition/sortition_test.go
deleted file mode 100644
index 6d4685762..000000000
--- a/data/committee/sortition/sortition_test.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright (C) 2019-2023 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package sortition
-
-import (
- "math/rand"
- "testing"
-
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-func BenchmarkSortition(b *testing.B) {
- b.StopTimer()
- keys := make([]crypto.Digest, b.N)
- for i := 0; i < b.N; i++ {
- rand.Read(keys[i][:])
- }
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- Select(1000000, 1000000000000, 2500, keys[i])
- }
-}
-
-func TestSortitionBasic(t *testing.T) {
- partitiontest.PartitionTest(t)
- hitcount := uint64(0)
- const N = 1000
- const expectedSize = 20
- const myMoney = 100
- const totalMoney = 200
- for i := 0; i < N; i++ {
- var vrfOutput crypto.Digest
- rand.Read(vrfOutput[:])
- selected := Select(myMoney, totalMoney, expectedSize, vrfOutput)
- hitcount += selected
- }
- expected := uint64(N * expectedSize / 2)
- var d uint64
- if expected > hitcount {
- d = expected - hitcount
- } else {
- d = hitcount - expected
- }
- // within 2% good enough
- maxd := expected / 50
- if d > maxd {
- t.Errorf("wanted %d selections but got %d, d=%d, maxd=%d", expected, hitcount, d, maxd)
- }
-}
diff --git a/data/hashable/msgp_gen.go b/data/hashable/msgp_gen.go
index bcb140564..74707f076 100644
--- a/data/hashable/msgp_gen.go
+++ b/data/hashable/msgp_gen.go
@@ -14,6 +14,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> MessageMaxSize()
//
// MarshalMsg implements msgp.Marshaler
@@ -121,3 +122,10 @@ func (z *Message) Msgsize() (s int) {
func (z *Message) MsgIsZero() bool {
return ((*z).Message == "")
}
+
+// MaxSize returns a maximum valid message size for this message type
+func MessageMaxSize() (s int) {
+ s = 1 + 4
+ panic("Unable to determine max size: String type z.Message is unbounded")
+ return
+}
diff --git a/data/ledger_test.go b/data/ledger_test.go
index 20a082839..cd7d6297a 100644
--- a/data/ledger_test.go
+++ b/data/ledger_test.go
@@ -19,6 +19,9 @@ package data
import (
"context"
"fmt"
+ "regexp"
+ "strconv"
+ "strings"
"sync"
"testing"
@@ -625,10 +628,52 @@ func TestLedgerErrorValidate(t *testing.T) {
for more {
select {
case err := <-errChan:
+ if strings.Contains(err.Error(), "before dbRound") {
+ // handle race eval errors like "round 1933 before dbRound 1934"
+ // see explanation in unexpectedMessages
+ re := regexp.MustCompile(`round (\d+) before dbRound (\d+)`)
+ result := re.FindStringSubmatch(err.Error())
+ require.NotNil(t, result)
+ require.Len(t, result, 3)
+ evalRound, err1 := strconv.Atoi(result[1])
+ require.NoError(t, err1)
+ dbRound, err1 := strconv.Atoi(result[2])
+ require.NoError(t, err1)
+ require.GreaterOrEqual(t, int(l.Latest()), dbRound+int(cfg.MaxAcctLookback))
+ require.Less(t, evalRound, dbRound)
+ err = nil
+ }
require.NoError(t, err)
case <-expectedMessages:
// only debug messages should be reported
case um := <-unexpectedMessages:
+ if strings.Contains(um, "before dbRound") {
+ // EnsureBlock might log the following:
+ // data.EnsureBlock: could not write block 774 to the ledger: round 773 before dbRound 774
+ // it happens because of simultaneous EnsureValidatedBlock and EnsureBlock calls
+ // that pass round check and then EnsureBlock yields after StartEvaluator.
+ // Meanwhile EnsureValidatedBlock finishes and adds the block to the ledger.
+ // After that trackersDB commit happen and account data get flushed.
+ // The EnsureBlock goroutine then tries to evaluate a first transaction and fails because
+ // the trackerDB advanced further.
+ // This is okay to ignore if
+ // - attempted round is less or equal than dbRound
+ // - ledger latest round is greater than dbRound + cfg.MaxAcctLookback
+ re := regexp.MustCompile(`could not write block (\d+) to the ledger: round (\d+) before dbRound (\d+)`)
+ result := re.FindStringSubmatch(um)
+ require.NotNil(t, result)
+ require.Len(t, result, 4)
+ attemptedRound, err := strconv.Atoi(result[1])
+ require.NoError(t, err)
+ evalRound, err := strconv.Atoi(result[2])
+ require.NoError(t, err)
+ dbRound, err := strconv.Atoi(result[3])
+ require.NoError(t, err)
+ require.Equal(t, attemptedRound, evalRound+1)
+ require.LessOrEqual(t, attemptedRound, dbRound)
+ require.GreaterOrEqual(t, int(l.Latest()), dbRound+int(cfg.MaxAcctLookback))
+ um = ""
+ }
require.Empty(t, um, um)
default:
more = false
diff --git a/data/pools/transactionPool_test.go b/data/pools/transactionPool_test.go
index b8adcff0a..f03eadf7d 100644
--- a/data/pools/transactionPool_test.go
+++ b/data/pools/transactionPool_test.go
@@ -585,7 +585,7 @@ func TestRememberForget(t *testing.T) {
require.Len(t, pending, 0)
}
-// Test that clean up works
+// Test that clean up works
func TestCleanUp(t *testing.T) {
partitiontest.PartitionTest(t)
diff --git a/data/stateproofmsg/msgp_gen.go b/data/stateproofmsg/msgp_gen.go
index d8dfb3948..02a17491b 100644
--- a/data/stateproofmsg/msgp_gen.go
+++ b/data/stateproofmsg/msgp_gen.go
@@ -16,6 +16,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> MessageMaxSize()
//
// MarshalMsg implements msgp.Marshaler
@@ -255,3 +256,9 @@ func (z *Message) Msgsize() (s int) {
func (z *Message) MsgIsZero() bool {
return (len((*z).BlockHeadersCommitment) == 0) && (len((*z).VotersCommitment) == 0) && ((*z).LnProvenWeight == 0) && ((*z).FirstAttestedRound == 0) && ((*z).LastAttestedRound == 0)
}
+
+// MaxSize returns a maximum valid message size for this message type
+func MessageMaxSize() (s int) {
+ s = 1 + 2 + msgp.BytesPrefixSize + crypto.Sha256Size + 2 + msgp.BytesPrefixSize + crypto.SumhashDigestSize + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size
+ return
+}
diff --git a/data/transactions/application.go b/data/transactions/application.go
index c303093c8..db7037700 100644
--- a/data/transactions/application.go
+++ b/data/transactions/application.go
@@ -20,6 +20,7 @@ import (
"fmt"
"github.com/algorand/go-algorand/data/basics"
+ "golang.org/x/exp/slices"
)
const (
@@ -56,6 +57,7 @@ const (
// OnCompletion is an enum representing some layer 1 side effect that an
// ApplicationCall transaction will have if it is included in a block.
+//
//go:generate stringer -type=OnCompletion -output=application_string.go
type OnCompletion uint64
@@ -106,7 +108,7 @@ type ApplicationCallTxnFields struct {
// ApplicationArgs are arguments accessible to the executing
// ApprovalProgram or ClearStateProgram.
- ApplicationArgs [][]byte `codec:"apaa,allocbound=encodedMaxApplicationArgs"`
+ ApplicationArgs [][]byte `codec:"apaa,allocbound=encodedMaxApplicationArgs,maxtotalbytes=config.MaxAppTotalArgLen"`
// Accounts are accounts whose balance records are accessible
// by the executing ApprovalProgram or ClearStateProgram. To
@@ -172,7 +174,7 @@ type BoxRef struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
Index uint64 `codec:"i"`
- Name []byte `codec:"n"`
+ Name []byte `codec:"n,allocbound=config.MaxBytesKeyValueLen"`
}
// Empty indicates whether or not all the fields in the
@@ -247,10 +249,8 @@ func (ac *ApplicationCallTxnFields) IndexByAddress(target basics.Address, sender
}
// Otherwise we index into ac.Accounts
- for idx, addr := range ac.Accounts {
- if target == addr {
- return uint64(idx) + 1, nil
- }
+ if idx := slices.Index(ac.Accounts, target); idx != -1 {
+ return uint64(idx) + 1, nil
}
return 0, fmt.Errorf("invalid Account reference %s", target)
diff --git a/data/transactions/common_test.go b/data/transactions/common_test.go
index 0509241e6..9ad00816a 100644
--- a/data/transactions/common_test.go
+++ b/data/transactions/common_test.go
@@ -17,7 +17,6 @@
package transactions
import (
- "errors"
"math/rand"
"github.com/algorand/go-algorand/config"
@@ -26,64 +25,6 @@ import (
"github.com/algorand/go-algorand/protocol"
)
-var poolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
-
-// BalanceMap is a simple implementation of the balances interface.
-type BalanceMap map[basics.Address]basics.BalanceRecord
-
-func (b BalanceMap) Move(src, dst basics.Address, amount basics.MicroAlgos) error {
- var overflowed bool
- var tmp basics.MicroAlgos
- srcBal, ok := b[src]
- if !ok {
- return errors.New("Move() called with src not in tx.RelevantAddrs")
- }
- tmp, overflowed = basics.OSubA(srcBal.MicroAlgos, amount)
- if overflowed {
- return errors.New("Move(): sender overspent")
- }
- srcBal.MicroAlgos = tmp
- b[src] = srcBal
-
- dstBal, ok := b[dst]
- if !ok {
- return errors.New("Move() called with dst not in tx.RelevantAddrs")
- }
- tmp, overflowed = basics.OAddA(dstBal.MicroAlgos, amount)
- if overflowed {
- return errors.New("Move(): recipient balance overflowed")
- }
- dstBal.MicroAlgos = tmp
- b[dst] = dstBal
-
- return nil
-}
-
-func (b BalanceMap) Get(addr basics.Address) (basics.BalanceRecord, error) {
- record, ok := b[addr]
- if !ok {
- return basics.BalanceRecord{}, errors.New("Get() called on an address not in tx.RelevantAddrs")
- }
- return record, nil
-}
-
-func (b BalanceMap) Put(record basics.BalanceRecord) error {
- if _, ok := b[record.Addr]; !ok {
- return errors.New("Put() called on an account whose address was not in tx.RelevantAddrs")
- }
- b[record.Addr] = record
- return nil
-}
-
-// set up a BalanceMap for a transaction containing only the transactions RelevantAddrs.
-func makeTestBalancesForTransaction(tx Transaction) BalanceMap {
- bals := make(BalanceMap)
- for _, addr := range tx.RelevantAddrs(SpecialAddresses{RewardsPool: poolAddr}) {
- bals[addr] = basics.BalanceRecord{Addr: addr}
- }
- return bals
-}
-
func generateTestObjects(numTxs, numAccs int) ([]Transaction, []SignedTxn, []*crypto.SignatureSecrets, []basics.Address) {
txs := make([]Transaction, numTxs)
signed := make([]SignedTxn, numTxs)
diff --git a/data/transactions/error.go b/data/transactions/error.go
index 4ce693cf9..c81af7a77 100644
--- a/data/transactions/error.go
+++ b/data/transactions/error.go
@@ -23,6 +23,7 @@ import (
)
// MinFeeError defines an error type which could be returned from the method WellFormed
+//
//msgp:ignore MinFeeError
type MinFeeError string
diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go
index 86719cb6b..20ba9563a 100644
--- a/data/transactions/logic/assembler.go
+++ b/data/transactions/logic/assembler.go
@@ -596,8 +596,8 @@ func asmPushInt(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sour
func asmPushInts(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
ops.pending.WriteByte(spec.Opcode)
- _, err := asmIntImmArgs(ops, args)
- return err
+ asmIntImmArgs(ops, args)
+ return nil
}
func asmPushBytes(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
@@ -672,6 +672,9 @@ func parseBinaryArgs(args []token) ([]byte, int, error) {
}
val, err := base64.StdEncoding.DecodeString(arg[open+1 : close])
if err != nil {
+ if cie, ok := err.(base64.CorruptInputError); ok {
+ return nil, 0, base64.CorruptInputError(int64(cie) + int64(open) + 1)
+ }
return nil, 0, err
}
return val, 1, nil
@@ -727,7 +730,7 @@ func parseStringLiteral(input string) (result []byte, err error) {
char := input[pos]
if char == '\\' && !escapeSeq {
if hexSeq {
- return nil, fmt.Errorf("escape seq inside hex number")
+ return nil, fmt.Errorf("escape sequence inside hex number")
}
escapeSeq = true
pos++
@@ -757,7 +760,7 @@ func parseStringLiteral(input string) (result []byte, err error) {
if hexSeq {
hexSeq = false
if pos >= len(input)-2 { // count a closing quote
- return nil, fmt.Errorf("non-terminated hex seq")
+ return nil, fmt.Errorf("non-terminated hex sequence")
}
num, err := strconv.ParseUint(input[pos:pos+2], 16, 8)
if err != nil {
@@ -771,7 +774,7 @@ func parseStringLiteral(input string) (result []byte, err error) {
pos++
}
if escapeSeq || hexSeq {
- return nil, fmt.Errorf("non-terminated escape seq")
+ return nil, fmt.Errorf("non-terminated escape sequence")
}
return
@@ -851,7 +854,7 @@ func asmMethod(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourc
return args[0].errorf("unable to parse method signature")
}
-func asmIntImmArgs(ops *OpStream, args []token) ([]uint64, *sourceError) {
+func asmIntImmArgs(ops *OpStream, args []token) []uint64 {
ivals := make([]uint64, len(args))
var scratch [binary.MaxVarintLen64]byte
l := binary.PutUvarint(scratch[:], uint64(len(args)))
@@ -866,15 +869,12 @@ func asmIntImmArgs(ops *OpStream, args []token) ([]uint64, *sourceError) {
ivals[i] = cu
}
- return ivals, nil
+ return ivals
}
func asmIntCBlock(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
ops.pending.WriteByte(spec.Opcode)
- ivals, err := asmIntImmArgs(ops, args)
- if err != nil {
- return err
- }
+ ivals := asmIntImmArgs(ops, args)
if !ops.known.deadcode {
// If we previously processed an `int`, we thought we could insert our
// own intcblock, but now we see a manual one.
@@ -895,10 +895,9 @@ func asmByteImmArgs(ops *OpStream, spec *OpSpec, args []token) ([][]byte, *sourc
for len(rest) > 0 {
val, consumed, err := parseBinaryArgs(rest)
if err != nil {
- // Would be nice to keep going, as in
- // intcblock, but parseBinaryArgs would have
- // to return a useful consumed value even in
- // the face of errors. Hard.
+ // Would be nice to keep going, as in asmIntImmArgs, but
+ // parseBinaryArgs would have to return a useful consumed value even
+ // in the face of errors. Hard.
return nil, rest[0].errorf("%s %w", spec.Name, err)
}
bvals = append(bvals, val)
@@ -1271,11 +1270,6 @@ func typeBury(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, erro
}
top := len(pgm.stack) - 1
- typ, ok := pgm.top()
- if !ok {
- return nil, nil, nil // Will error because bury demands a stack arg
- }
-
idx := top - n
if idx < 0 {
if pgm.bottom.AVMType == avmNone {
@@ -1286,6 +1280,7 @@ func typeBury(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, erro
// nothing to update.
return nil, nil, nil
}
+ typ, _ := pgm.top()
returns := make(StackTypes, n)
copy(returns, pgm.stack[idx:]) // Won't have room to copy the top type
@@ -1320,11 +1315,7 @@ func typeFrameBury(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes,
return nil, nil, nil
}
- top := len(pgm.stack) - 1
- typ, ok := pgm.top()
- if !ok {
- return nil, nil, nil // Will error because fbury demands a stack arg
- }
+ typ, _ := pgm.top() // !ok is a degenerate case anyway
// If we have no frame pointer, we have to wipe out any belief that the
// stack contains anything but the supplied type.
@@ -1344,6 +1335,7 @@ func typeFrameBury(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes,
if idx < 0 {
return nil, nil, fmt.Errorf("frame_bury %d in sub with %d args", n, pgm.fp)
}
+ top := len(pgm.stack) - 1
if idx >= top {
return nil, nil, fmt.Errorf("frame_bury above stack")
}
@@ -1649,15 +1641,15 @@ func getSpec(ops *OpStream, mnemonic token, argCount int) (OpSpec, string, bool)
name := mnemonic.str
pseudoSpecs, ok := ops.versionedPseudoOps[name]
if ok {
- pseudo, ok := pseudoSpecs[argCount]
- if !ok {
+ pseudo, ok2 := pseudoSpecs[argCount]
+ if !ok2 {
// Could be that pseudoOp wants to handle immediates itself so check -1 key
- pseudo, ok = pseudoSpecs[anyImmediates]
- if !ok {
+ pseudo, ok2 = pseudoSpecs[anyImmediates]
+ if !ok2 {
// Number of immediates supplied did not match any of the pseudoOps of the given name, so we try to construct a mock spec that can be used to track types
pseudoImmediatesError(ops, mnemonic, pseudoSpecs)
- proto, version, ok := mergeProtos(pseudoSpecs)
- if !ok {
+ proto, version, ok3 := mergeProtos(pseudoSpecs)
+ if !ok3 {
return OpSpec{}, "", false
}
pseudo = OpSpec{Name: name, Proto: proto, Version: version, OpDetails: OpDetails{
@@ -1759,6 +1751,7 @@ func isFullSpec(spec OpSpec) bool {
func mergeProtos(specs map[int]OpSpec) (Proto, uint64, bool) {
var args StackTypes
var returns StackTypes
+ var debugExplainFuncPtr debugStackExplain
var minVersion uint64
i := 0
for _, spec := range specs {
@@ -1784,9 +1777,16 @@ func mergeProtos(specs map[int]OpSpec) (Proto, uint64, bool) {
}
}
}
+ if debugExplainFuncPtr == nil {
+ debugExplainFuncPtr = spec.Explain
+ }
i++
}
- return Proto{typedList{args, ""}, typedList{returns, ""}}, minVersion, true
+ return Proto{
+ Arg: typedList{args, ""},
+ Return: typedList{returns, ""},
+ Explain: debugExplainFuncPtr,
+ }, minVersion, true
}
func prepareVersionedPseudoTable(version uint64) map[string]map[int]OpSpec {
@@ -1830,18 +1830,6 @@ func errorLinef(line int, format string, a ...interface{}) *sourceError {
return &sourceError{line, 0, fmt.Errorf(format, a...)}
}
-func typecheck(expected, got StackType) bool {
- // Some ops push 'any' and we wait for run time to see what it is.
- // Some of those 'any' are based on fields that we _could_ know now but haven't written a more detailed system of typecheck for (yet).
- if expected == StackAny && got == StackNone { // Any is lenient, but stack can't be empty
- return false
- }
- if (expected == StackAny) || (got == StackAny) {
- return true
- }
- return expected == got
-}
-
type token struct {
str string
col int
@@ -2019,15 +2007,15 @@ func (ops *OpStream) trackStack(args StackTypes, returns StackTypes, instruction
// nextStatement breaks tokens into two slices at the first semicolon and expands macros along the way.
func nextStatement(ops *OpStream, tokens []token) (current, rest []token) {
for i := 0; i < len(tokens); i++ {
- token := tokens[i]
- replacement, ok := ops.macros[token.str]
+ tok := tokens[i]
+ replacement, ok := ops.macros[tok.str]
if ok {
tokens = append(tokens[0:i], append(replacement[1:], tokens[i+1:]...)...)
// backup to handle potential re-expansion of the first token in the expansion
i--
continue
}
- if token.str == ";" {
+ if tok.str == ";" {
return tokens[:i], tokens[i+1:]
}
}
diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go
index 713528b72..fa0bcc133 100644
--- a/data/transactions/logic/assembler_test.go
+++ b/data/transactions/logic/assembler_test.go
@@ -716,6 +716,14 @@ func testLine(t *testing.T, line string, ver uint64, expected string, col ...int
testProg(t, source, ver, exp(2, expected, col...))
}
+func TestAssembleArg(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ testLine(t, "arg", AssemblerMaxVersion, "arg expects 1 immediate argument", 3)
+ testLine(t, "arg x", AssemblerMaxVersion, "unable to parse argument...", 4)
+}
+
func TestAssembleTxna(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -770,6 +778,12 @@ func TestAssembleTxna(t *testing.T) {
testLine(t, "gtxn 0 Accounts", AssemblerMaxVersion, "\"Accounts\" field of gtxn can only be used with 3 immediates")
testLine(t, "gtxn 0 Accounts", 1, "\"Accounts\" field of gtxn can only be used with 3 immediates")
testLine(t, "gtxn 0 Accounts 1", AssemblerMaxVersion, "")
+
+ testLine(t, "itxn 0 Accounts 1", AssemblerMaxVersion, "itxn expects 1 or 2 immediate arguments")
+ testLine(t, "gitxn 0", AssemblerMaxVersion, "gitxn expects 2 or 3 immediate arguments")
+ testLine(t, "itxn_field", 5, "itxn_field expects 1 ...")
+ testLine(t, "itxn_field ABC", 5, "itxn_field unknown field: \"ABC\"")
+ testLine(t, "itxn_field Accounts 0", 5, "itxn_field expects 1 ...")
}
func TestAssembleGlobal(t *testing.T) {
@@ -930,12 +944,15 @@ func TestAssembleBytes(t *testing.T) {
{`byte base32(MFRGGZDFMY)extrajunk`, "...must end at first closing parenthesis", 5},
{`byte base32(MFRGGZDFMY)x`, "...must end at first closing parenthesis", 5},
{`byte base32(MFRGGZDFMY`, "...lacks closing parenthesis", 5},
+ {`byte base64(YWJ#ZGVm)`, "...illegal base64 data at input byte 10", 5},
+ {`byte base64 YWJ#ZGVm`, "...illegal base64 data at input byte 3", 12},
{`byte b32 mfrggzdfmy`, "...illegal base32 data at input byte 0", 9},
{`byte b32 MFrggzdfmy`, "...illegal base32 data at input byte 2", 9},
{`byte b32(mfrggzdfmy)`, "...illegal base32 data at input byte 4", 5},
{`byte b32(MFrggzdfmy)`, "...illegal base32 data at input byte 6", 5},
{`byte base32(mfrggzdfmy)`, "...illegal base32 data at input byte 7", 5},
{`byte base32(MFrggzdfmy)`, "...illegal base32 data at input byte 9", 5},
+ {`byte 0xFFGG`, "...invalid byte...", 5},
}
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
@@ -1019,9 +1036,9 @@ func TestManualCBlocks(t *testing.T) {
testProg(t, "addr RWXCBB73XJITATVQFOI7MVUUQOL2PFDDSDUMW4H4T2SNSX4SEUOQ2MM7F4; bytecblock 0x44", 3, exp(1, "bytecblock following byte/addr/method"))
// But we can't complain precisely once backjumps are allowed, so we force
- // compile to push*. (We don't analyze the CFG, so we don't know if we can
- // use what is in the user defined block. Perhaps we could special case
- // single cblocks at start of program.
+ // compile to push*. We don't analyze the CFG, so we don't know if we can
+ // use the user defined block. Perhaps we could special case single cblocks
+ // at the start of program, or any code dominated by a particular block.
checkSame(t, 4,
"intcblock 4 5 1; int 4; int 1; +; int 5; ==",
"intcblock 4 5 1; pushint 4; pushint 1; +; pushint 5; ==")
@@ -1759,6 +1776,8 @@ func TestConstantArgs(t *testing.T) {
testProg(t, "addr x", v, exp(1, "failed to decode address x ...", 5))
testProg(t, "method", v, exp(1, "method expects 1 immediate argument", 6))
testProg(t, "method xx yy", v, exp(1, "method expects 1 immediate argument", 10))
+ testProg(t, `method "x\x"`, v, exp(1, "non-terminated escape sequence", 7))
+ testProg(t, `method xx`, v, exp(1, "unable to parse method signature", 7))
}
for v := uint64(3); v <= AssemblerMaxVersion; v++ {
testProg(t, "pushint", v, exp(1, "pushint expects 1 immediate argument"))
@@ -2327,12 +2346,12 @@ func TestStringLiteralParsing(t *testing.T) {
s = `"test\"`
result, err = parseStringLiteral(s)
- require.EqualError(t, err, "non-terminated escape seq")
+ require.EqualError(t, err, "non-terminated escape sequence")
require.Nil(t, result)
s = `"test\x\"`
result, err = parseStringLiteral(s)
- require.EqualError(t, err, "escape seq inside hex number")
+ require.EqualError(t, err, "escape sequence inside hex number")
require.Nil(t, result)
s = `"test\a"`
@@ -2342,7 +2361,7 @@ func TestStringLiteralParsing(t *testing.T) {
s = `"test\x10\x1"`
result, err = parseStringLiteral(s)
- require.EqualError(t, err, "non-terminated hex seq")
+ require.EqualError(t, err, "non-terminated hex sequence")
require.Nil(t, result)
}
@@ -2492,7 +2511,8 @@ func TestAssembleConstants(t *testing.T) {
testProg(t, "intcblock 1 2\nintc 1", v)
testLine(t, "bytec 1", v, "bytec 1 is not defined")
- testProg(t, "bytecblock 0x01 0x02\nbytec 1", v)
+ testProg(t, "bytecblock 0x01 0x02; bytec 1", v)
+ testProg(t, "bytecblock 0x1 0x02; int 1", v, exp(1, "...odd length hex string", 11))
})
}
}
@@ -2641,6 +2661,10 @@ func TestBuryAsm(t *testing.T) {
// Even when we are burying into unknown (seems repetitive, but is an easy bug)
testProg(t, "int 0; int 0; b LABEL; LABEL: int 1; int 2; int 4; bury 4; concat", AssemblerMaxVersion,
exp(1, "concat arg 1 wanted type []byte..."))
+
+ testProg(t, "intcblock 55; bury 1; int 1", AssemblerMaxVersion, exp(1, "bury 1 expects 2 stack arguments...", 14))
+ testProg(t, "intcblock 55; int 2; bury 1; int 1", AssemblerMaxVersion, exp(1, "bury 1 expects 2 stack arguments...", 21))
+ testProg(t, "int 3; int 2; bury 0; int 1", AssemblerMaxVersion, exp(1, "bury 0 always fails"))
}
func TestEqualsTypeCheck(t *testing.T) {
@@ -2916,7 +2940,9 @@ func TestMergeProtos(t *testing.T) {
aaVa := OpSpec{Proto: proto("aa:a")}
aVaa := OpSpec{Proto: proto("a:aa")}
p, _, _ := mergeProtos(map[int]OpSpec{0: iVi, 1: bVb})
- require.Equal(t, proto("a:a"), p)
+ expected := proto("a:a")
+ require.Equal(t, expected.Arg, p.Arg)
+ require.Equal(t, expected.Return, p.Return)
_, _, ok := mergeProtos(map[int]OpSpec{0: aaVa, 1: iVi})
require.False(t, ok)
_, _, ok = mergeProtos(map[int]OpSpec{0: aVaa, 1: iVi})
@@ -2924,7 +2950,9 @@ func TestMergeProtos(t *testing.T) {
medley := OpSpec{Proto: proto("aibibabai:aibibabai")}
medley2 := OpSpec{Proto: proto("biabbaiia:biabbaiia")}
p, _, _ = mergeProtos(map[int]OpSpec{0: medley, 1: medley2})
- require.Equal(t, proto("aiaabaaaa:aiaabaaaa"), p)
+ expected = proto("aiaabaaaa:aiaabaaaa")
+ require.Equal(t, expected.Arg, p.Arg)
+ require.Equal(t, expected.Return, p.Return)
v1 := OpSpec{Version: 1, Proto: proto(":")}
v2 := OpSpec{Version: 2, Proto: proto(":")}
_, v, _ := mergeProtos(map[int]OpSpec{0: v2, 1: v1})
diff --git a/data/transactions/logic/backwardCompat_test.go b/data/transactions/logic/backwardCompat_test.go
index 84b230bb8..0e0292ea9 100644
--- a/data/transactions/logic/backwardCompat_test.go
+++ b/data/transactions/logic/backwardCompat_test.go
@@ -368,7 +368,6 @@ func TestBackwardCompatGlobalFields(t *testing.T) {
ops := testProg(t, text, AssemblerMaxVersion)
ep, _, _ := makeSampleEnvWithVersion(1)
- ep.TxnGroup[0].Txn.RekeyTo = basics.Address{} // avoid min version issues
ep.TxnGroup[0].Lsig.Logic = ops.Program
_, err := EvalSignature(0, ep)
require.Error(t, err)
@@ -431,11 +430,7 @@ func TestBackwardCompatTxnFields(t *testing.T) {
require.NoError(t, err)
}
- ep, tx, _ := makeSampleEnvWithVersion(1)
- // We'll reject too early if we have a nonzero RekeyTo, because that
- // field must be zero for every txn in the group if this is an old
- // AVM version
- tx.RekeyTo = basics.Address{}
+ ep, _, _ := makeSampleEnvWithVersion(1)
ep.TxnGroup[0].Lsig.Logic = ops.Program
// check failure with version check
diff --git a/data/transactions/logic/box.go b/data/transactions/logic/box.go
index 388ae77fe..02b4d8cd2 100644
--- a/data/transactions/logic/box.go
+++ b/data/transactions/logic/box.go
@@ -105,11 +105,11 @@ func argCheck(cx *EvalContext, name string, size uint64) error {
}
func opBoxCreate(cx *EvalContext) error {
- last := len(cx.stack) - 1 // size
+ last := len(cx.Stack) - 1 // size
prev := last - 1 // name
- name := string(cx.stack[prev].Bytes)
- size := cx.stack[last].Uint
+ name := string(cx.Stack[prev].Bytes)
+ size := cx.Stack[last].Uint
err := argCheck(cx, name, size)
if err != nil {
@@ -127,19 +127,19 @@ func opBoxCreate(cx *EvalContext) error {
}
}
- cx.stack[prev] = boolToSV(!exists)
- cx.stack = cx.stack[:last]
+ cx.Stack[prev] = boolToSV(!exists)
+ cx.Stack = cx.Stack[:last]
return err
}
func opBoxExtract(cx *EvalContext) error {
- last := len(cx.stack) - 1 // length
+ last := len(cx.Stack) - 1 // length
prev := last - 1 // start
pprev := prev - 1 // name
- name := string(cx.stack[pprev].Bytes)
- start := cx.stack[prev].Uint
- length := cx.stack[last].Uint
+ name := string(cx.Stack[pprev].Bytes)
+ start := cx.Stack[prev].Uint
+ length := cx.Stack[last].Uint
err := argCheck(cx, name, basics.AddSaturate(start, length))
if err != nil {
@@ -154,19 +154,19 @@ func opBoxExtract(cx *EvalContext) error {
}
bytes, err := extractCarefully(contents, start, length)
- cx.stack[pprev].Bytes = bytes
- cx.stack = cx.stack[:prev]
+ cx.Stack[pprev].Bytes = bytes
+ cx.Stack = cx.Stack[:prev]
return err
}
func opBoxReplace(cx *EvalContext) error {
- last := len(cx.stack) - 1 // replacement
+ last := len(cx.Stack) - 1 // replacement
prev := last - 1 // start
pprev := prev - 1 // name
- replacement := cx.stack[last].Bytes
- start := cx.stack[prev].Uint
- name := string(cx.stack[pprev].Bytes)
+ replacement := cx.Stack[last].Bytes
+ start := cx.Stack[prev].Uint
+ name := string(cx.Stack[pprev].Bytes)
err := argCheck(cx, name, basics.AddSaturate(start, uint64(len(replacement))))
if err != nil {
@@ -185,13 +185,13 @@ func opBoxReplace(cx *EvalContext) error {
if err != nil {
return err
}
- cx.stack = cx.stack[:pprev]
+ cx.Stack = cx.Stack[:pprev]
return cx.Ledger.SetBox(cx.appID, name, bytes)
}
func opBoxDel(cx *EvalContext) error {
- last := len(cx.stack) - 1 // name
- name := string(cx.stack[last].Bytes)
+ last := len(cx.Stack) - 1 // name
+ name := string(cx.Stack[last].Bytes)
err := argCheck(cx, name, 0)
if err != nil {
@@ -208,13 +208,13 @@ func opBoxDel(cx *EvalContext) error {
return err
}
}
- cx.stack[last] = boolToSV(exists)
+ cx.Stack[last] = boolToSV(exists)
return nil
}
func opBoxLen(cx *EvalContext) error {
- last := len(cx.stack) - 1 // name
- name := string(cx.stack[last].Bytes)
+ last := len(cx.Stack) - 1 // name
+ name := string(cx.Stack[last].Bytes)
err := argCheck(cx, name, 0)
if err != nil {
@@ -225,14 +225,14 @@ func opBoxLen(cx *EvalContext) error {
return err
}
- cx.stack[last] = stackValue{Uint: uint64(len(contents))}
- cx.stack = append(cx.stack, boolToSV(exists))
+ cx.Stack[last] = stackValue{Uint: uint64(len(contents))}
+ cx.Stack = append(cx.Stack, boolToSV(exists))
return nil
}
func opBoxGet(cx *EvalContext) error {
- last := len(cx.stack) - 1 // name
- name := string(cx.stack[last].Bytes)
+ last := len(cx.Stack) - 1 // name
+ name := string(cx.Stack[last].Bytes)
err := argCheck(cx, name, 0)
if err != nil {
@@ -245,17 +245,17 @@ func opBoxGet(cx *EvalContext) error {
if !exists {
contents = []byte{}
}
- cx.stack[last].Bytes = contents // Will rightly panic if too big
- cx.stack = append(cx.stack, boolToSV(exists))
+ cx.Stack[last].Bytes = contents // Will rightly panic if too big
+ cx.Stack = append(cx.Stack, boolToSV(exists))
return nil
}
func opBoxPut(cx *EvalContext) error {
- last := len(cx.stack) - 1 // value
+ last := len(cx.Stack) - 1 // value
prev := last - 1 // name
- value := cx.stack[last].Bytes
- name := string(cx.stack[prev].Bytes)
+ value := cx.Stack[last].Bytes
+ name := string(cx.Stack[prev].Bytes)
err := argCheck(cx, name, uint64(len(value)))
if err != nil {
@@ -268,7 +268,7 @@ func opBoxPut(cx *EvalContext) error {
return err
}
- cx.stack = cx.stack[:prev]
+ cx.Stack = cx.Stack[:prev]
if exists {
/* the replacement must match existing size */
diff --git a/data/transactions/logic/debugger.go b/data/transactions/logic/debugger.go
index bc10ca79d..eda022e72 100644
--- a/data/transactions/logic/debugger.go
+++ b/data/transactions/logic/debugger.go
@@ -177,7 +177,7 @@ func makeDebugState(cx *EvalContext) *DebugState {
globals := make([]basics.TealValue, len(globalFieldSpecs))
for _, fs := range globalFieldSpecs {
// Don't try to grab app only fields when evaluating a signature
- if (cx.runModeFlags&ModeSig) != 0 && fs.mode == ModeApp {
+ if cx.runMode == ModeSig && fs.mode == ModeApp {
continue
}
sv, err := cx.globalFieldToValue(fs)
@@ -188,7 +188,7 @@ func makeDebugState(cx *EvalContext) *DebugState {
}
ds.Globals = globals
- if (cx.runModeFlags & ModeApp) != 0 {
+ if cx.runMode == ModeApp {
ds.EvalDelta = cx.txn.EvalDelta
}
@@ -285,13 +285,13 @@ func (a *debuggerEvalTracerAdaptor) refreshDebugState(cx *EvalContext, evalError
ds.Error = evalError.Error()
}
- stack := make([]basics.TealValue, len(cx.stack))
- for i, sv := range cx.stack {
+ stack := make([]basics.TealValue, len(cx.Stack))
+ for i, sv := range cx.Stack {
stack[i] = sv.toEncodedTealValue()
}
- scratch := make([]basics.TealValue, len(cx.scratch))
- for i, sv := range cx.scratch {
+ scratch := make([]basics.TealValue, len(cx.Scratch))
+ for i, sv := range cx.Scratch {
scratch[i] = sv.toEncodedTealValue()
}
@@ -300,7 +300,7 @@ func (a *debuggerEvalTracerAdaptor) refreshDebugState(cx *EvalContext, evalError
ds.OpcodeBudget = cx.remainingBudget()
ds.CallStack = ds.parseCallstack(cx.callstack)
- if (cx.runModeFlags & ModeApp) != 0 {
+ if cx.runMode == ModeApp {
ds.EvalDelta = cx.txn.EvalDelta
}
diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go
index e4e524880..dc9dcf7b6 100644
--- a/data/transactions/logic/doc.go
+++ b/data/transactions/logic/doc.go
@@ -388,7 +388,7 @@ var opDocExtras = map[string]string{
"itxn_submit": "`itxn_submit` resets the current transaction so that it can not be resubmitted. A new `itxn_begin` is required to prepare another inner transaction.",
"base64_decode": "*Warning*: Usage should be restricted to very rare use cases. In almost all cases, smart contracts should directly handle non-encoded byte-strings. This opcode should only be used in cases where base64 is the only available option, e.g. interoperability with a third-party that only signs base64 strings.\n\n Decodes A using the base64 encoding E. Specify the encoding with an immediate arg either as URL and Filename Safe (`URLEncoding`) or Standard (`StdEncoding`). See [RFC 4648 sections 4 and 5](https://rfc-editor.org/rfc/rfc4648.html#section-4). It is assumed that the encoding ends with the exact number of `=` padding characters as required by the RFC. When padding occurs, any unused pad bits in the encoding must be set to zero or the decoding will fail. The special cases of `\\n` and `\\r` are allowed but completely ignored. An error will result when attempting to decode a string with a character that is not in the encoding alphabet or not one of `=`, `\\r`, or `\\n`.",
- "json_ref": "*Warning*: Usage should be restricted to very rare use cases, as JSON decoding is expensive and quite limited. In addition, JSON objects are large and not optimized for size.\n\nAlmost all smart contracts should use simpler and smaller methods (such as the [ABI](https://arc.algorand.foundation/ARCs/arc-0004). This opcode should only be used in cases where JSON is only available option, e.g. when a third-party only signs JSON.",
+ "json_ref": "*Warning*: Usage should be restricted to very rare use cases, as JSON decoding is expensive and quite limited. In addition, JSON objects are large and not optimized for size.\n\nAlmost all smart contracts should use simpler and smaller methods (such as the [ABI](https://arc.algorand.foundation/ARCs/arc-0004). This opcode should only be used in cases where JSON is only available option, e.g. when a third-party only signs JSON.",
"match": "`match` consumes N+1 values from the stack. Let the top stack value be B. The following N values represent an ordered list of match cases/constants (A), where the first value (A[0]) is the deepest in the stack. The immediate arguments are an ordered list of N labels (T). `match` will branch to target T[I], where A[I] = B. If there are no matches then execution continues on to the next instruction.",
diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go
index 4fe94f80f..3d64c5fbc 100644
--- a/data/transactions/logic/eval.go
+++ b/data/transactions/logic/eval.go
@@ -35,6 +35,7 @@ import (
"strings"
"golang.org/x/crypto/sha3"
+ "golang.org/x/exp/slices"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
@@ -60,7 +61,7 @@ const maxStringSize = 4096
const maxByteMathSize = 64
// maxLogSize is the limit of total log size from n log calls in a program
-const maxLogSize = 1024
+const maxLogSize = config.MaxEvalDeltaTotalLogSize
// maxLogCalls is the limit of total log calls during a program execution
const maxLogCalls = 32
@@ -160,7 +161,8 @@ func (sv stackValue) string(limit int) (string, error) {
return string(sv.Bytes), nil
}
-func (sv stackValue) toTealValue() basics.TealValue {
+// ToTealValue converts a stack value instance into a basics.TealValue instance
+func (sv stackValue) ToTealValue() basics.TealValue {
if sv.avmType() == avmBytes {
return basics.TealValue{Type: basics.TealBytesType, Bytes: string(sv.Bytes)}
}
@@ -569,8 +571,8 @@ type scratchSpace [256]stackValue
type EvalContext struct {
*EvalParams
- // determines eval mode: runModeSignature or runModeApplication
- runModeFlags RunMode
+ // determines eval mode: ModeSig or ModeApp
+ runMode RunMode
// the index of the transaction being evaluated
groupIndex int
@@ -586,7 +588,7 @@ type EvalContext struct {
// keeping the running changes, the debugger can be changed to display them
// as the app runs.
- stack []stackValue
+ Stack []stackValue
callstack []frame
fromCallsub bool
@@ -597,7 +599,7 @@ type EvalContext struct {
intc []uint64
bytec [][]byte
version uint64
- scratch scratchSpace
+ Scratch scratchSpace
subtxns []transactions.SignedTxnWithAD // place to build for itxn_submit
cost int // cost incurred so far
@@ -623,9 +625,18 @@ func (cx *EvalContext) GroupIndex() int {
// RunMode returns the evaluation context's mode (signature or application)
func (cx *EvalContext) RunMode() RunMode {
- return cx.runModeFlags
+ return cx.runMode
}
+// PC returns the program counter of the current application being evaluated
+func (cx *EvalContext) PC() int { return cx.pc }
+
+// GetOpSpec queries for the OpSpec w.r.t. current program byte.
+func (cx *EvalContext) GetOpSpec() OpSpec { return opsByOpcode[cx.version][cx.program[cx.pc]] }
+
+// GetProgram queries for the current program
+func (cx *EvalContext) GetProgram() []byte { return cx.program }
+
// avmType describes the type of a value on the operand stack
// avmTypes are a subset of StackTypes
type avmType byte
@@ -658,23 +669,6 @@ func (at avmType) String() string {
return "internal error, unknown type"
}
-// stackType lifts the avmType to a StackType
-// it can do this because the base StackTypes
-// are a superset of avmType
-func (at avmType) stackType() StackType {
- switch at {
- case avmNone:
- return StackNone
- case avmAny:
- return StackAny
- case avmUint64:
- return StackUint64
- case avmBytes:
- return StackBytes
- }
- return StackNone
-}
-
var (
// StackUint64 is any valid uint64
StackUint64 = NewStackType(avmUint64, bound(0, math.MaxUint64))
@@ -858,29 +852,6 @@ func (st StackType) Typed() bool {
// StackTypes is an alias for a list of StackType with syntactic sugar
type StackTypes []StackType
-// Reversed returns the StackTypes in reverse order
-// useful for displaying the stack as an op sees it
-func (st StackTypes) Reversed() StackTypes {
- nst := make(StackTypes, len(st))
- for idx := 0; idx < len(st); idx++ {
- nst[idx] = st[len(st)-1-idx]
- }
- return nst
-}
-
-func (st StackTypes) String() string {
- // Note this reverses the stack so top appears first
- return fmt.Sprintf("(%s)", strings.Join(st.strings(), ", "))
-}
-
-func (st StackTypes) strings() []string {
- var strs = make([]string, len(st))
- for idx, s := range st {
- strs[idx] = s.String()
- }
- return strs
-}
-
func parseStackTypes(spec string) StackTypes {
if spec == "" {
return nil
@@ -896,6 +867,16 @@ func parseStackTypes(spec string) StackTypes {
return types
}
+func filterNoneTypes(sts StackTypes) StackTypes {
+ var filteredSts = make(StackTypes, 0, len(sts))
+ for i := range sts {
+ if sts[i].AVMType != avmNone {
+ filteredSts = append(filteredSts, sts[i])
+ }
+ }
+ return filteredSts
+}
+
// panicError wraps a recover() catching a panic()
type panicError struct {
PanicValue interface{}
@@ -947,11 +928,11 @@ func EvalContract(program []byte, gi int, aid basics.AppIndex, params *EvalParam
return false, nil, errors.New("0 appId in contract eval")
}
cx := EvalContext{
- EvalParams: params,
- runModeFlags: ModeApp,
- groupIndex: gi,
- txn: &params.TxnGroup[gi],
- appID: aid,
+ EvalParams: params,
+ runMode: ModeApp,
+ groupIndex: gi,
+ txn: &params.TxnGroup[gi],
+ appID: aid,
}
if cx.Proto.IsolateClearState && cx.txn.Txn.OnCompletion == transactions.ClearStateOC {
@@ -1031,7 +1012,7 @@ func EvalContract(program []byte, gi int, aid basics.AppIndex, params *EvalParam
// Save scratch for `gload`. We used to copy, but cx.scratch is quite large,
// about 8k, and caused measurable CPU and memory demands. Of course, these
// should never be changed by later transactions.
- cx.pastScratch[cx.groupIndex] = &cx.scratch
+ cx.pastScratch[cx.groupIndex] = &cx.Scratch
return pass, &cx, err
}
@@ -1050,10 +1031,10 @@ func EvalSignatureFull(gi int, params *EvalParams) (bool, *EvalContext, error) {
return false, nil, errors.New("no sig ledger in signature eval")
}
cx := EvalContext{
- EvalParams: params,
- runModeFlags: ModeSig,
- groupIndex: gi,
- txn: &params.TxnGroup[gi],
+ EvalParams: params,
+ runMode: ModeSig,
+ groupIndex: gi,
+ txn: &params.TxnGroup[gi],
}
pass, err := eval(cx.txn.Lsig.Logic, &cx)
@@ -1100,7 +1081,7 @@ func eval(program []byte, cx *EvalContext) (pass bool, err error) {
cx.pc = vlen
// 16 is chosen to avoid growth for small programs, and so that repeated
// doublings lead to a number just a bit above 1000, the max stack height.
- cx.stack = make([]stackValue, 0, 16)
+ cx.Stack = make([]stackValue, 0, 16)
cx.program = program
cx.txn.EvalDelta.GlobalDelta = basics.StateDelta{}
cx.txn.EvalDelta.LocalDeltas = make(map[uint64]basics.StateDelta)
@@ -1155,20 +1136,20 @@ func eval(program []byte, cx *EvalContext) (pass bool, err error) {
return false, err
}
- if len(cx.stack) != 1 {
+ if len(cx.Stack) != 1 {
if cx.Trace != nil {
fmt.Fprintf(cx.Trace, "end stack:\n")
- for i, sv := range cx.stack {
+ for i, sv := range cx.Stack {
fmt.Fprintf(cx.Trace, "[%d] %s\n", i, sv)
}
}
- return false, fmt.Errorf("stack len is %d instead of 1", len(cx.stack))
+ return false, fmt.Errorf("stack len is %d instead of 1", len(cx.Stack))
}
- if cx.stack[0].Bytes != nil {
+ if cx.Stack[0].Bytes != nil {
return false, errors.New("stack finished with bytes not int")
}
- return cx.stack[0].Uint != 0, nil
+ return cx.Stack[0].Uint != 0, nil
}
// CheckContract should be faster than EvalContract. It can perform
@@ -1212,7 +1193,7 @@ func check(program []byte, params *EvalParams, mode RunMode) (err error) {
cx.version = version
cx.pc = vlen
cx.EvalParams = params
- cx.runModeFlags = mode
+ cx.runMode = mode
cx.program = program
cx.branchTargets = make([]bool, len(program)+1) // teal v2 allowed jumping to the end of the prog
cx.instructionStarts = make([]bool, len(program)+1)
@@ -1298,7 +1279,7 @@ func (cx *EvalContext) AppID() basics.AppIndex {
}
func (cx *EvalContext) remainingBudget() int {
- if cx.runModeFlags == ModeSig {
+ if cx.runMode == ModeSig {
return int(cx.Proto.LogicSigMaxCost) - cx.cost
}
@@ -1336,18 +1317,18 @@ func (cx *EvalContext) step() error {
if spec.op == nil {
return fmt.Errorf("%3d illegal opcode 0x%02x", cx.pc, opcode)
}
- if (cx.runModeFlags & spec.Modes) == 0 {
+ if (cx.runMode & spec.Modes) == 0 {
return fmt.Errorf("%s not allowed in current mode", spec.Name)
}
// check args for stack underflow and types
- if len(cx.stack) < len(spec.Arg.Types) {
+ if len(cx.Stack) < len(spec.Arg.Types) {
return fmt.Errorf("stack underflow in %s", spec.Name)
}
- first := len(cx.stack) - len(spec.Arg.Types)
+ first := len(cx.Stack) - len(spec.Arg.Types)
for i, argType := range spec.Arg.Types {
- if !opCompat(argType.AVMType, cx.stack[first+i].avmType()) {
- return fmt.Errorf("%s arg %d wanted %s but got %s", spec.Name, i, argType, cx.stack[first+i].typeName())
+ if !opCompat(argType.AVMType, cx.Stack[first+i].avmType()) {
+ return fmt.Errorf("%s arg %d wanted %s but got %s", spec.Name, i, argType, cx.Stack[first+i].typeName())
}
}
@@ -1359,9 +1340,9 @@ func (cx *EvalContext) step() error {
// It's something like a 5-10% overhead on our simplest instructions to make
// the Cost() call without the FullCost.compute() short-circuit, even
// though Cost() tries to exit fast. Use BenchmarkUintMath to test changes.
- opcost := deets.FullCost.compute(cx.stack)
+ opcost := deets.FullCost.compute(cx.Stack)
if opcost <= 0 {
- opcost = deets.Cost(cx.program, cx.pc, cx.stack)
+ opcost = deets.Cost(cx.program, cx.pc, cx.Stack)
if opcost <= 0 {
return fmt.Errorf("%3d %s returned 0 cost", cx.pc, spec.Name)
}
@@ -1383,26 +1364,26 @@ func (cx *EvalContext) step() error {
cx.pc, spec.Name, cx.cost)
}
- preheight := len(cx.stack)
+ preheight := len(cx.Stack)
err := spec.op(cx)
if err == nil && !spec.trusted {
- postheight := len(cx.stack)
+ postheight := len(cx.Stack)
if postheight-preheight != len(spec.Return.Types)-len(spec.Arg.Types) && !spec.AlwaysExits() {
return fmt.Errorf("%s changed stack height improperly %d != %d",
spec.Name, postheight-preheight, len(spec.Return.Types)-len(spec.Arg.Types))
}
first = postheight - len(spec.Return.Types)
for i, argType := range spec.Return.Types {
- stackType := cx.stack[first+i].avmType()
+ stackType := cx.Stack[first+i].avmType()
if !opCompat(argType.AVMType, stackType) {
if spec.AlwaysExits() { // We test in the loop because it's the uncommon case.
break
}
- return fmt.Errorf("%s produced %s but intended %s", spec.Name, cx.stack[first+i].typeName(), argType)
+ return fmt.Errorf("%s produced %s but intended %s", spec.Name, cx.Stack[first+i].typeName(), argType)
}
- if stackType == avmBytes && len(cx.stack[first+i].Bytes) > maxStringSize {
- return fmt.Errorf("%s produced a too big (%d) byte-array", spec.Name, len(cx.stack[first+i].Bytes))
+ if stackType == avmBytes && len(cx.Stack[first+i].Bytes) > maxStringSize {
+ return fmt.Errorf("%s produced a too big (%d) byte-array", spec.Name, len(cx.Stack[first+i].Bytes))
}
}
}
@@ -1433,7 +1414,7 @@ func (cx *EvalContext) step() error {
return inner
}
var stackString string
- if len(cx.stack) == 0 {
+ if len(cx.Stack) == 0 {
stackString = "<empty stack>"
} else {
num := 1
@@ -1443,11 +1424,11 @@ func (cx *EvalContext) step() error {
// check for nil error here, because we might not return
// values if we encounter an error in the opcode
if err == nil {
- if len(cx.stack) < num {
- return fmt.Errorf("stack underflow: expected %d, have %d", num, len(cx.stack))
+ if len(cx.Stack) < num {
+ return fmt.Errorf("stack underflow: expected %d, have %d", num, len(cx.Stack))
}
for i := 1; i <= num; i++ {
- stackString += fmt.Sprintf("(%s) ", cx.stack[len(cx.stack)-i])
+ stackString += fmt.Sprintf("(%s) ", cx.Stack[len(cx.Stack)-i])
}
}
}
@@ -1458,7 +1439,7 @@ func (cx *EvalContext) step() error {
return err
}
- if len(cx.stack) > maxStackDepth {
+ if len(cx.Stack) > maxStackDepth {
return errors.New("stack overflow")
}
if cx.nextpc != 0 {
@@ -1485,7 +1466,7 @@ func (cx *EvalContext) checkStep() (int, error) {
if spec.op == nil {
return 0, fmt.Errorf("illegal opcode 0x%02x", opcode)
}
- if (cx.runModeFlags & spec.Modes) == 0 {
+ if (cx.runMode & spec.Modes) == 0 {
return 0, fmt.Errorf("%s not allowed in current mode", spec.Name)
}
deets := spec.OpDetails
@@ -1523,11 +1504,11 @@ func (cx *EvalContext) checkStep() (int, error) {
}
func (cx *EvalContext) ensureStackCap(targetCap int) {
- if cap(cx.stack) < targetCap {
+ if cap(cx.Stack) < targetCap {
// Let's grow all at once, plus a little slack.
- newStack := make([]stackValue, len(cx.stack), targetCap+4)
- copy(newStack, cx.stack)
- cx.stack = newStack
+ newStack := make([]stackValue, len(cx.Stack), targetCap+4)
+ copy(newStack, cx.Stack)
+ cx.Stack = newStack
}
}
@@ -1539,64 +1520,64 @@ func opReturn(cx *EvalContext) error {
// Achieve the end condition:
// Take the last element on the stack and make it the return value (only element on the stack)
// Move the pc to the end of the program
- last := len(cx.stack) - 1
- cx.stack[0] = cx.stack[last]
- cx.stack = cx.stack[:1]
+ last := len(cx.Stack) - 1
+ cx.Stack[0] = cx.Stack[last]
+ cx.Stack = cx.Stack[:1]
cx.nextpc = len(cx.program)
return nil
}
func opAssert(cx *EvalContext) error {
- last := len(cx.stack) - 1
- if cx.stack[last].Uint != 0 {
- cx.stack = cx.stack[:last]
+ last := len(cx.Stack) - 1
+ if cx.Stack[last].Uint != 0 {
+ cx.Stack = cx.Stack[:last]
return nil
}
return fmt.Errorf("assert failed pc=%d", cx.pc)
}
func opSwap(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- cx.stack[last], cx.stack[prev] = cx.stack[prev], cx.stack[last]
+ cx.Stack[last], cx.Stack[prev] = cx.Stack[prev], cx.Stack[last]
return nil
}
func opSelect(cx *EvalContext) error {
- last := len(cx.stack) - 1 // condition on top
+ last := len(cx.Stack) - 1 // condition on top
prev := last - 1 // true is one down
pprev := prev - 1 // false below that
- if cx.stack[last].Uint != 0 {
- cx.stack[pprev] = cx.stack[prev]
+ if cx.Stack[last].Uint != 0 {
+ cx.Stack[pprev] = cx.Stack[prev]
}
- cx.stack = cx.stack[:prev]
+ cx.Stack = cx.Stack[:prev]
return nil
}
func opSHA256(cx *EvalContext) error {
- last := len(cx.stack) - 1
- hash := sha256.Sum256(cx.stack[last].Bytes)
- cx.stack[last].Bytes = hash[:]
+ last := len(cx.Stack) - 1
+ hash := sha256.Sum256(cx.Stack[last].Bytes)
+ cx.Stack[last].Bytes = hash[:]
return nil
}
// The NIST SHA3-256 is implemented for compatibility with ICON
func opSHA3_256(cx *EvalContext) error {
- last := len(cx.stack) - 1
- hash := sha3.Sum256(cx.stack[last].Bytes)
- cx.stack[last].Bytes = hash[:]
+ last := len(cx.Stack) - 1
+ hash := sha3.Sum256(cx.Stack[last].Bytes)
+ cx.Stack[last].Bytes = hash[:]
return nil
}
// The Keccak256 variant of SHA-3 is implemented for compatibility with Ethereum
func opKeccak256(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
hasher := sha3.NewLegacyKeccak256()
- hasher.Write(cx.stack[last].Bytes)
+ hasher.Write(cx.Stack[last].Bytes)
hv := make([]byte, 0, hasher.Size())
hv = hasher.Sum(hv)
- cx.stack[last].Bytes = hv
+ cx.Stack[last].Bytes = hv
return nil
}
@@ -1607,30 +1588,30 @@ func opKeccak256(cx *EvalContext) error {
// to a different default hash. For stability of this language, at
// that time a new opcode should be made with the new hash.
func opSHA512_256(cx *EvalContext) error {
- last := len(cx.stack) - 1
- hash := sha512.Sum512_256(cx.stack[last].Bytes)
- cx.stack[last].Bytes = hash[:]
+ last := len(cx.Stack) - 1
+ hash := sha512.Sum512_256(cx.Stack[last].Bytes)
+ cx.Stack[last].Bytes = hash[:]
return nil
}
func opPlus(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- sum, carry := bits.Add64(cx.stack[prev].Uint, cx.stack[last].Uint, 0)
+ sum, carry := bits.Add64(cx.Stack[prev].Uint, cx.Stack[last].Uint, 0)
if carry > 0 {
return errors.New("+ overflowed")
}
- cx.stack[prev].Uint = sum
- cx.stack = cx.stack[:last]
+ cx.Stack[prev].Uint = sum
+ cx.Stack = cx.Stack[:last]
return nil
}
func opAddw(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- sum, carry := bits.Add64(cx.stack[prev].Uint, cx.stack[last].Uint, 0)
- cx.stack[prev].Uint = carry
- cx.stack[last].Uint = sum
+ sum, carry := bits.Add64(cx.Stack[prev].Uint, cx.Stack[last].Uint, 0)
+ cx.Stack[prev].Uint = carry
+ cx.Stack[last].Uint = sum
return nil
}
@@ -1653,83 +1634,83 @@ func opDivModwImpl(hiNum, loNum, hiDen, loDen uint64) (hiQuo uint64, loQuo uint6
}
func opDivModw(cx *EvalContext) error {
- loDen := len(cx.stack) - 1
+ loDen := len(cx.Stack) - 1
hiDen := loDen - 1
- if cx.stack[loDen].Uint == 0 && cx.stack[hiDen].Uint == 0 {
+ if cx.Stack[loDen].Uint == 0 && cx.Stack[hiDen].Uint == 0 {
return errors.New("/ 0")
}
loNum := loDen - 2
hiNum := loDen - 3
hiQuo, loQuo, hiRem, loRem :=
- opDivModwImpl(cx.stack[hiNum].Uint, cx.stack[loNum].Uint, cx.stack[hiDen].Uint, cx.stack[loDen].Uint)
- cx.stack[hiNum].Uint = hiQuo
- cx.stack[loNum].Uint = loQuo
- cx.stack[hiDen].Uint = hiRem
- cx.stack[loDen].Uint = loRem
+ opDivModwImpl(cx.Stack[hiNum].Uint, cx.Stack[loNum].Uint, cx.Stack[hiDen].Uint, cx.Stack[loDen].Uint)
+ cx.Stack[hiNum].Uint = hiQuo
+ cx.Stack[loNum].Uint = loQuo
+ cx.Stack[hiDen].Uint = hiRem
+ cx.Stack[loDen].Uint = loRem
return nil
}
func opMinus(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- if cx.stack[last].Uint > cx.stack[prev].Uint {
+ if cx.Stack[last].Uint > cx.Stack[prev].Uint {
return errors.New("- would result negative")
}
- cx.stack[prev].Uint -= cx.stack[last].Uint
- cx.stack = cx.stack[:last]
+ cx.Stack[prev].Uint -= cx.Stack[last].Uint
+ cx.Stack = cx.Stack[:last]
return nil
}
func opDiv(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- if cx.stack[last].Uint == 0 {
+ if cx.Stack[last].Uint == 0 {
return errors.New("/ 0")
}
- cx.stack[prev].Uint /= cx.stack[last].Uint
- cx.stack = cx.stack[:last]
+ cx.Stack[prev].Uint /= cx.Stack[last].Uint
+ cx.Stack = cx.Stack[:last]
return nil
}
func opModulo(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- if cx.stack[last].Uint == 0 {
+ if cx.Stack[last].Uint == 0 {
return errors.New("% 0")
}
- cx.stack[prev].Uint = cx.stack[prev].Uint % cx.stack[last].Uint
- cx.stack = cx.stack[:last]
+ cx.Stack[prev].Uint = cx.Stack[prev].Uint % cx.Stack[last].Uint
+ cx.Stack = cx.Stack[:last]
return nil
}
func opMul(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- high, low := bits.Mul64(cx.stack[prev].Uint, cx.stack[last].Uint)
+ high, low := bits.Mul64(cx.Stack[prev].Uint, cx.Stack[last].Uint)
if high > 0 {
return errors.New("* overflowed")
}
- cx.stack[prev].Uint = low
- cx.stack = cx.stack[:last]
+ cx.Stack[prev].Uint = low
+ cx.Stack = cx.Stack[:last]
return nil
}
func opMulw(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- high, low := bits.Mul64(cx.stack[prev].Uint, cx.stack[last].Uint)
- cx.stack[prev].Uint = high
- cx.stack[last].Uint = low
+ high, low := bits.Mul64(cx.Stack[prev].Uint, cx.Stack[last].Uint)
+ cx.Stack[prev].Uint = high
+ cx.Stack[last].Uint = low
return nil
}
func opDivw(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
pprev := last - 2
- hi := cx.stack[pprev].Uint
- lo := cx.stack[prev].Uint
- y := cx.stack[last].Uint
+ hi := cx.Stack[pprev].Uint
+ lo := cx.Stack[prev].Uint
+ y := cx.Stack[last].Uint
// These two clauses catch what will cause panics in bits.Div64, so we get
// nicer errors.
if y == 0 {
@@ -1739,17 +1720,17 @@ func opDivw(cx *EvalContext) error {
return fmt.Errorf("divw overflow: %d <= %d", y, hi)
}
quo, _ := bits.Div64(hi, lo, y)
- cx.stack = cx.stack[:prev] // pop 2
- cx.stack[pprev].Uint = quo
+ cx.Stack = cx.Stack[:prev] // pop 2
+ cx.Stack[pprev].Uint = quo
return nil
}
func opLt(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- cond := cx.stack[prev].Uint < cx.stack[last].Uint
- cx.stack[prev] = boolToSV(cond)
- cx.stack = cx.stack[:last]
+ cond := cx.Stack[prev].Uint < cx.Stack[last].Uint
+ cx.Stack[prev] = boolToSV(cond)
+ cx.Stack = cx.Stack[:last]
return nil
}
@@ -1771,39 +1752,39 @@ func opGe(cx *EvalContext) error {
}
func opAnd(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- cond := (cx.stack[prev].Uint != 0) && (cx.stack[last].Uint != 0)
- cx.stack[prev] = boolToSV(cond)
- cx.stack = cx.stack[:last]
+ cond := (cx.Stack[prev].Uint != 0) && (cx.Stack[last].Uint != 0)
+ cx.Stack[prev] = boolToSV(cond)
+ cx.Stack = cx.Stack[:last]
return nil
}
func opOr(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- cond := (cx.stack[prev].Uint != 0) || (cx.stack[last].Uint != 0)
- cx.stack[prev] = boolToSV(cond)
- cx.stack = cx.stack[:last]
+ cond := (cx.Stack[prev].Uint != 0) || (cx.Stack[last].Uint != 0)
+ cx.Stack[prev] = boolToSV(cond)
+ cx.Stack = cx.Stack[:last]
return nil
}
func opEq(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- ta := cx.stack[prev].avmType()
- tb := cx.stack[last].avmType()
+ ta := cx.Stack[prev].avmType()
+ tb := cx.Stack[last].avmType()
if ta != tb {
- return fmt.Errorf("cannot compare (%s to %s)", cx.stack[prev].typeName(), cx.stack[last].typeName())
+ return fmt.Errorf("cannot compare (%s to %s)", cx.Stack[prev].typeName(), cx.Stack[last].typeName())
}
var cond bool
if ta == avmBytes {
- cond = bytes.Equal(cx.stack[prev].Bytes, cx.stack[last].Bytes)
+ cond = bytes.Equal(cx.Stack[prev].Bytes, cx.Stack[last].Bytes)
} else {
- cond = cx.stack[prev].Uint == cx.stack[last].Uint
+ cond = cx.Stack[prev].Uint == cx.Stack[last].Uint
}
- cx.stack[prev] = boolToSV(cond)
- cx.stack = cx.stack[:last]
+ cx.Stack[prev] = boolToSV(cond)
+ cx.Stack = cx.Stack[:last]
return nil
}
@@ -1816,31 +1797,31 @@ func opNeq(cx *EvalContext) error {
}
func opNot(cx *EvalContext) error {
- last := len(cx.stack) - 1
- cx.stack[last] = boolToSV(cx.stack[last].Uint == 0)
+ last := len(cx.Stack) - 1
+ cx.Stack[last] = boolToSV(cx.Stack[last].Uint == 0)
return nil
}
func opLen(cx *EvalContext) error {
- last := len(cx.stack) - 1
- cx.stack[last].Uint = uint64(len(cx.stack[last].Bytes))
- cx.stack[last].Bytes = nil
+ last := len(cx.Stack) - 1
+ cx.Stack[last].Uint = uint64(len(cx.Stack[last].Bytes))
+ cx.Stack[last].Bytes = nil
return nil
}
func opItob(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
ibytes := make([]byte, 8)
- binary.BigEndian.PutUint64(ibytes, cx.stack[last].Uint)
+ binary.BigEndian.PutUint64(ibytes, cx.Stack[last].Uint)
// cx.stack[last].Uint is not cleared out as optimization
// stackValue.avmType() checks Bytes field first
- cx.stack[last].Bytes = ibytes
+ cx.Stack[last].Bytes = ibytes
return nil
}
func opBtoi(cx *EvalContext) error {
- last := len(cx.stack) - 1
- ibytes := cx.stack[last].Bytes
+ last := len(cx.Stack) - 1
+ ibytes := cx.Stack[last].Bytes
if len(ibytes) > 8 {
return fmt.Errorf("btoi arg too long, got [%d]bytes", len(ibytes))
}
@@ -1849,60 +1830,60 @@ func opBtoi(cx *EvalContext) error {
value = value << 8
value = value | (uint64(b) & 0x0ff)
}
- cx.stack[last].Uint = value
- cx.stack[last].Bytes = nil
+ cx.Stack[last].Uint = value
+ cx.Stack[last].Bytes = nil
return nil
}
func opBitOr(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- cx.stack[prev].Uint = cx.stack[prev].Uint | cx.stack[last].Uint
- cx.stack = cx.stack[:last]
+ cx.Stack[prev].Uint = cx.Stack[prev].Uint | cx.Stack[last].Uint
+ cx.Stack = cx.Stack[:last]
return nil
}
func opBitAnd(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- cx.stack[prev].Uint = cx.stack[prev].Uint & cx.stack[last].Uint
- cx.stack = cx.stack[:last]
+ cx.Stack[prev].Uint = cx.Stack[prev].Uint & cx.Stack[last].Uint
+ cx.Stack = cx.Stack[:last]
return nil
}
func opBitXor(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- cx.stack[prev].Uint = cx.stack[prev].Uint ^ cx.stack[last].Uint
- cx.stack = cx.stack[:last]
+ cx.Stack[prev].Uint = cx.Stack[prev].Uint ^ cx.Stack[last].Uint
+ cx.Stack = cx.Stack[:last]
return nil
}
func opBitNot(cx *EvalContext) error {
- last := len(cx.stack) - 1
- cx.stack[last].Uint = cx.stack[last].Uint ^ 0xffffffffffffffff
+ last := len(cx.Stack) - 1
+ cx.Stack[last].Uint = cx.Stack[last].Uint ^ 0xffffffffffffffff
return nil
}
func opShiftLeft(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- if cx.stack[last].Uint > 63 {
- return fmt.Errorf("shl arg too big, (%d)", cx.stack[last].Uint)
+ if cx.Stack[last].Uint > 63 {
+ return fmt.Errorf("shl arg too big, (%d)", cx.Stack[last].Uint)
}
- cx.stack[prev].Uint = cx.stack[prev].Uint << cx.stack[last].Uint
- cx.stack = cx.stack[:last]
+ cx.Stack[prev].Uint = cx.Stack[prev].Uint << cx.Stack[last].Uint
+ cx.Stack = cx.Stack[:last]
return nil
}
func opShiftRight(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- if cx.stack[last].Uint > 63 {
- return fmt.Errorf("shr arg too big, (%d)", cx.stack[last].Uint)
+ if cx.Stack[last].Uint > 63 {
+ return fmt.Errorf("shr arg too big, (%d)", cx.Stack[last].Uint)
}
- cx.stack[prev].Uint = cx.stack[prev].Uint >> cx.stack[last].Uint
- cx.stack = cx.stack[:last]
+ cx.Stack[prev].Uint = cx.Stack[prev].Uint >> cx.Stack[last].Uint
+ cx.Stack = cx.Stack[:last]
return nil
}
@@ -1915,9 +1896,9 @@ func opSqrt(cx *EvalContext) error {
http://www.embedded.com/electronics-blogs/programmer-s-toolbox/4219659/Integer-Square-Roots
*/
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
- sq := cx.stack[last].Uint
+ sq := cx.Stack[last].Uint
var rem uint64 = 0
var root uint64 = 0
@@ -1930,27 +1911,27 @@ func opSqrt(cx *EvalContext) error {
root += 2
}
}
- cx.stack[last].Uint = root >> 1
+ cx.Stack[last].Uint = root >> 1
return nil
}
func opBitLen(cx *EvalContext) error {
- last := len(cx.stack) - 1
- if cx.stack[last].avmType() == avmUint64 {
- cx.stack[last].Uint = uint64(bits.Len64(cx.stack[last].Uint))
+ last := len(cx.Stack) - 1
+ if cx.Stack[last].avmType() == avmUint64 {
+ cx.Stack[last].Uint = uint64(bits.Len64(cx.Stack[last].Uint))
return nil
}
- length := len(cx.stack[last].Bytes)
+ length := len(cx.Stack[last].Bytes)
idx := 0
- for i, b := range cx.stack[last].Bytes {
+ for i, b := range cx.Stack[last].Bytes {
if b != 0 {
idx = bits.Len8(b) + (8 * (length - i - 1))
break
}
}
- cx.stack[last].Bytes = nil
- cx.stack[last].Uint = uint64(idx)
+ cx.Stack[last].Bytes = nil
+ cx.Stack[last].Uint = uint64(idx)
return nil
}
@@ -1983,17 +1964,17 @@ func opExpImpl(base uint64, exp uint64) (uint64, error) {
}
func opExp(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- exp := cx.stack[last].Uint
- base := cx.stack[prev].Uint
+ exp := cx.Stack[last].Uint
+ base := cx.Stack[prev].Uint
val, err := opExpImpl(base, exp)
if err != nil {
return err
}
- cx.stack[prev].Uint = val
- cx.stack = cx.stack[:last]
+ cx.Stack[prev].Uint = val
+ cx.Stack = cx.Stack[:last]
return nil
}
@@ -2027,11 +2008,11 @@ func opExpwImpl(base uint64, exp uint64) (*big.Int, error) {
}
func opExpw(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- exp := cx.stack[last].Uint
- base := cx.stack[prev].Uint
+ exp := cx.Stack[last].Uint
+ base := cx.Stack[prev].Uint
val, err := opExpwImpl(base, exp)
if err != nil {
return err
@@ -2039,27 +2020,27 @@ func opExpw(cx *EvalContext) error {
hi := new(big.Int).Rsh(val, 64).Uint64()
lo := val.Uint64()
- cx.stack[prev].Uint = hi
- cx.stack[last].Uint = lo
+ cx.Stack[prev].Uint = hi
+ cx.Stack[last].Uint = lo
return nil
}
func opBytesBinOp(cx *EvalContext, result *big.Int, op func(x, y *big.Int) *big.Int) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- if len(cx.stack[last].Bytes) > maxByteMathSize || len(cx.stack[prev].Bytes) > maxByteMathSize {
+ if len(cx.Stack[last].Bytes) > maxByteMathSize || len(cx.Stack[prev].Bytes) > maxByteMathSize {
return errors.New("math attempted on large byte-array")
}
- rhs := new(big.Int).SetBytes(cx.stack[last].Bytes)
- lhs := new(big.Int).SetBytes(cx.stack[prev].Bytes)
+ rhs := new(big.Int).SetBytes(cx.Stack[last].Bytes)
+ lhs := new(big.Int).SetBytes(cx.Stack[prev].Bytes)
op(lhs, rhs) // op's receiver has already been bound to result
if result.Sign() < 0 {
return errors.New("byte math would have negative result")
}
- cx.stack[prev].Bytes = result.Bytes()
- cx.stack = cx.stack[:last]
+ cx.Stack[prev].Bytes = result.Bytes()
+ cx.Stack = cx.Stack[:last]
return nil
}
@@ -2096,15 +2077,15 @@ func opBytesMul(cx *EvalContext) error {
}
func opBytesSqrt(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
- if len(cx.stack[last].Bytes) > maxByteMathSize {
+ if len(cx.Stack[last].Bytes) > maxByteMathSize {
return errors.New("math attempted on large byte-array")
}
- val := new(big.Int).SetBytes(cx.stack[last].Bytes)
+ val := new(big.Int).SetBytes(cx.Stack[last].Bytes)
val.Sqrt(val)
- cx.stack[last].Bytes = val.Bytes()
+ cx.Stack[last].Bytes = val.Bytes()
return nil
}
@@ -2118,26 +2099,26 @@ func nonzero(b []byte) []byte {
}
func opBytesLt(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- if len(cx.stack[last].Bytes) > maxByteMathSize || len(cx.stack[prev].Bytes) > maxByteMathSize {
+ if len(cx.Stack[last].Bytes) > maxByteMathSize || len(cx.Stack[prev].Bytes) > maxByteMathSize {
return errors.New("math attempted on large byte-array")
}
- rhs := nonzero(cx.stack[last].Bytes)
- lhs := nonzero(cx.stack[prev].Bytes)
+ rhs := nonzero(cx.Stack[last].Bytes)
+ lhs := nonzero(cx.Stack[prev].Bytes)
switch {
case len(lhs) < len(rhs):
- cx.stack[prev] = boolToSV(true)
+ cx.Stack[prev] = boolToSV(true)
case len(lhs) > len(rhs):
- cx.stack[prev] = boolToSV(false)
+ cx.Stack[prev] = boolToSV(false)
default:
- cx.stack[prev] = boolToSV(bytes.Compare(lhs, rhs) < 0)
+ cx.Stack[prev] = boolToSV(bytes.Compare(lhs, rhs) < 0)
}
- cx.stack = cx.stack[:last]
+ cx.Stack = cx.Stack[:last]
return nil
}
@@ -2163,18 +2144,18 @@ func opBytesGe(cx *EvalContext) error {
}
func opBytesEq(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- if len(cx.stack[last].Bytes) > maxByteMathSize || len(cx.stack[prev].Bytes) > maxByteMathSize {
+ if len(cx.Stack[last].Bytes) > maxByteMathSize || len(cx.Stack[prev].Bytes) > maxByteMathSize {
return errors.New("math attempted on large byte-array")
}
- rhs := nonzero(cx.stack[last].Bytes)
- lhs := nonzero(cx.stack[prev].Bytes)
+ rhs := nonzero(cx.Stack[last].Bytes)
+ lhs := nonzero(cx.Stack[prev].Bytes)
- cx.stack[prev] = boolToSV(bytes.Equal(lhs, rhs))
- cx.stack = cx.stack[:last]
+ cx.Stack[prev] = boolToSV(bytes.Equal(lhs, rhs))
+ cx.Stack = cx.Stack[:last]
return nil
}
@@ -2215,20 +2196,20 @@ func zpad(smaller []byte, size int) []byte {
// must be newly allocated, and already in place at the top of stack
// (the original top having been popped).
func opBytesBinaryLogicPrep(cx *EvalContext) ([]byte, []byte) {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- llen := len(cx.stack[last].Bytes)
- plen := len(cx.stack[prev].Bytes)
+ llen := len(cx.Stack[last].Bytes)
+ plen := len(cx.Stack[prev].Bytes)
var fresh, other []byte
if llen > plen {
- fresh, other = zpad(cx.stack[prev].Bytes, llen), cx.stack[last].Bytes
+ fresh, other = zpad(cx.Stack[prev].Bytes, llen), cx.Stack[last].Bytes
} else {
- fresh, other = zpad(cx.stack[last].Bytes, plen), cx.stack[prev].Bytes
+ fresh, other = zpad(cx.Stack[last].Bytes, plen), cx.Stack[prev].Bytes
}
- cx.stack[prev].Bytes = fresh
- cx.stack = cx.stack[:last]
+ cx.Stack[prev].Bytes = fresh
+ cx.Stack = cx.Stack[:last]
return fresh, other
}
@@ -2257,23 +2238,23 @@ func opBytesBitXor(cx *EvalContext) error {
}
func opBytesBitNot(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
- fresh := make([]byte, len(cx.stack[last].Bytes))
- for i, b := range cx.stack[last].Bytes {
+ fresh := make([]byte, len(cx.Stack[last].Bytes))
+ for i, b := range cx.Stack[last].Bytes {
fresh[i] = ^b
}
- cx.stack[last].Bytes = fresh
+ cx.Stack[last].Bytes = fresh
return nil
}
func opBytesZero(cx *EvalContext) error {
- last := len(cx.stack) - 1
- length := cx.stack[last].Uint
+ last := len(cx.Stack) - 1
+ length := cx.Stack[last].Uint
if length > maxStringSize {
return fmt.Errorf("bzero attempted to create a too large string")
}
- cx.stack[last].Bytes = make([]byte, length)
+ cx.Stack[last].Bytes = make([]byte, length)
return nil
}
@@ -2285,9 +2266,9 @@ func opIntConstBlock(cx *EvalContext) error {
func opIntConstN(cx *EvalContext, n byte) error {
if int(n) >= len(cx.intc) {
- return fmt.Errorf("intc [%d] beyond %d constants", n, len(cx.intc))
+ return fmt.Errorf("intc %d beyond %d constants", n, len(cx.intc))
}
- cx.stack = append(cx.stack, stackValue{Uint: cx.intc[n]})
+ cx.Stack = append(cx.Stack, stackValue{Uint: cx.intc[n]})
return nil
}
func opIntConstLoad(cx *EvalContext) error {
@@ -2314,7 +2295,7 @@ func opPushInt(cx *EvalContext) error {
return fmt.Errorf("could not decode int at program[%d]", pos)
}
sv := stackValue{Uint: val}
- cx.stack = append(cx.stack, sv)
+ cx.Stack = append(cx.Stack, sv)
cx.nextpc = pos + bytesUsed
return nil
}
@@ -2324,11 +2305,11 @@ func opPushInts(cx *EvalContext) error {
if err != nil {
return err
}
- finalLen := len(cx.stack) + len(intc)
+ finalLen := len(cx.Stack) + len(intc)
cx.ensureStackCap(finalLen)
for _, cint := range intc {
sv := stackValue{Uint: cint}
- cx.stack = append(cx.stack, sv)
+ cx.Stack = append(cx.Stack, sv)
}
cx.nextpc = nextpc
return nil
@@ -2342,9 +2323,9 @@ func opByteConstBlock(cx *EvalContext) error {
func opByteConstN(cx *EvalContext, n uint) error {
if n >= uint(len(cx.bytec)) {
- return fmt.Errorf("bytec [%d] beyond %d constants", n, len(cx.bytec))
+ return fmt.Errorf("bytec %d beyond %d constants", n, len(cx.bytec))
}
- cx.stack = append(cx.stack, stackValue{Bytes: cx.bytec[n]})
+ cx.Stack = append(cx.Stack, stackValue{Bytes: cx.bytec[n]})
return nil
}
func opByteConstLoad(cx *EvalContext) error {
@@ -2376,7 +2357,7 @@ func opPushBytes(cx *EvalContext) error {
return fmt.Errorf("pushbytes too long at program[%d]", pos)
}
sv := stackValue{Bytes: cx.program[pos:end]}
- cx.stack = append(cx.stack, sv)
+ cx.Stack = append(cx.Stack, sv)
cx.nextpc = int(end)
return nil
}
@@ -2386,11 +2367,11 @@ func opPushBytess(cx *EvalContext) error {
if err != nil {
return err
}
- finalLen := len(cx.stack) + len(cbytess)
+ finalLen := len(cx.Stack) + len(cbytess)
cx.ensureStackCap(finalLen)
for _, cbytes := range cbytess {
sv := stackValue{Bytes: cbytes}
- cx.stack = append(cx.stack, sv)
+ cx.Stack = append(cx.Stack, sv)
}
cx.nextpc = nextpc
return nil
@@ -2401,7 +2382,7 @@ func opArgN(cx *EvalContext, n uint64) error {
return fmt.Errorf("cannot load arg[%d] of %d", n, len(cx.txn.Lsig.Args))
}
val := nilToEmpty(cx.txn.Lsig.Args[n])
- cx.stack = append(cx.stack, stackValue{Bytes: val})
+ cx.Stack = append(cx.Stack, stackValue{Bytes: val})
return nil
}
@@ -2422,10 +2403,10 @@ func opArg3(cx *EvalContext) error {
return opArgN(cx, 3)
}
func opArgs(cx *EvalContext) error {
- last := len(cx.stack) - 1
- n := cx.stack[last].Uint
+ last := len(cx.Stack) - 1
+ n := cx.Stack[last].Uint
// Pop the index and push the result back on the stack.
- cx.stack = cx.stack[:last]
+ cx.Stack = cx.Stack[:last]
return opArgN(cx, n)
}
@@ -2522,10 +2503,10 @@ func checkSwitch(cx *EvalContext) error {
}
func opBnz(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
cx.nextpc = cx.pc + 3
- isNonZero := cx.stack[last].Uint != 0
- cx.stack = cx.stack[:last] // pop
+ isNonZero := cx.Stack[last].Uint != 0
+ cx.Stack = cx.Stack[:last] // pop
if isNonZero {
target, err := branchTarget(cx)
if err != nil {
@@ -2537,10 +2518,10 @@ func opBnz(cx *EvalContext) error {
}
func opBz(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
cx.nextpc = cx.pc + 3
- isZero := cx.stack[last].Uint == 0
- cx.stack = cx.stack[:last] // pop
+ isZero := cx.Stack[last].Uint == 0
+ cx.Stack = cx.Stack[:last] // pop
if isZero {
target, err := branchTarget(cx)
if err != nil {
@@ -2561,10 +2542,10 @@ func opB(cx *EvalContext) error {
}
func opSwitch(cx *EvalContext) error {
- last := len(cx.stack) - 1
- branchIdx := cx.stack[last].Uint
+ last := len(cx.Stack) - 1
+ branchIdx := cx.Stack[last].Uint
- cx.stack = cx.stack[:last]
+ cx.Stack = cx.Stack[:last]
target, err := switchTarget(cx, branchIdx)
if err != nil {
return err
@@ -2576,17 +2557,17 @@ func opSwitch(cx *EvalContext) error {
func opMatch(cx *EvalContext) error {
n := int(cx.program[cx.pc+1])
// stack contains the n sized match list and the single match value
- if n+1 > len(cx.stack) {
- return fmt.Errorf("match expects %d stack args while stack only contains %d", n+1, len(cx.stack))
+ if n+1 > len(cx.Stack) {
+ return fmt.Errorf("match expects %d stack args while stack only contains %d", n+1, len(cx.Stack))
}
- last := len(cx.stack) - 1
- matchVal := cx.stack[last]
- cx.stack = cx.stack[:last]
+ last := len(cx.Stack) - 1
+ matchVal := cx.Stack[last]
+ cx.Stack = cx.Stack[:last]
- argBase := len(cx.stack) - n
- matchList := cx.stack[argBase:]
- cx.stack = cx.stack[:argBase]
+ argBase := len(cx.Stack) - n
+ matchList := cx.Stack[argBase:]
+ cx.Stack = cx.Stack[:argBase]
matchedIdx := n
for i, stackArg := range matchList {
@@ -2616,7 +2597,7 @@ const protoByte = 0x8a
func opCallSub(cx *EvalContext) error {
cx.callstack = append(cx.callstack, frame{
retpc: cx.pc + 3, // retpc is pc _after_ the callsub
- height: len(cx.stack),
+ height: len(cx.Stack),
})
err := opB(cx)
@@ -2638,90 +2619,90 @@ func opRetSub(cx *EvalContext) error {
if top < 0 {
return errors.New("retsub with empty callstack")
}
- frame := cx.callstack[top]
- if frame.clear { // A `proto` was issued in the subroutine, so retsub cleans up.
- expect := frame.height + frame.returns
- if len(cx.stack) < expect { // Check general error case first, only diffentiate when error is assured
+ topFrame := cx.callstack[top]
+ if topFrame.clear { // A `proto` was issued in the subroutine, so retsub cleans up.
+ expect := topFrame.height + topFrame.returns
+ if len(cx.Stack) < expect { // Check general error case first, only diffentiate when error is assured
switch {
- case len(cx.stack) < frame.height:
+ case len(cx.Stack) < topFrame.height:
return fmt.Errorf("retsub executed with stack below frame. Did you pop args?")
- case len(cx.stack) == frame.height:
- return fmt.Errorf("retsub executed with no return values on stack. proto declared %d", frame.returns)
+ case len(cx.Stack) == topFrame.height:
+ return fmt.Errorf("retsub executed with no return values on stack. proto declared %d", topFrame.returns)
default:
return fmt.Errorf("retsub executed with %d return values on stack. proto declared %d",
- len(cx.stack)-frame.height, frame.returns)
+ len(cx.Stack)-topFrame.height, topFrame.returns)
}
}
- argstart := frame.height - frame.args
- copy(cx.stack[argstart:], cx.stack[frame.height:expect])
- cx.stack = cx.stack[:argstart+frame.returns]
+ argstart := topFrame.height - topFrame.args
+ copy(cx.Stack[argstart:], cx.Stack[topFrame.height:expect])
+ cx.Stack = cx.Stack[:argstart+topFrame.returns]
}
cx.callstack = cx.callstack[:top]
- cx.nextpc = frame.retpc
+ cx.nextpc = topFrame.retpc
return nil
}
func opPop(cx *EvalContext) error {
- last := len(cx.stack) - 1
- cx.stack = cx.stack[:last]
+ last := len(cx.Stack) - 1
+ cx.Stack = cx.Stack[:last]
return nil
}
func opDup(cx *EvalContext) error {
- last := len(cx.stack) - 1
- sv := cx.stack[last]
- cx.stack = append(cx.stack, sv)
+ last := len(cx.Stack) - 1
+ sv := cx.Stack[last]
+ cx.Stack = append(cx.Stack, sv)
return nil
}
func opDup2(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- cx.stack = append(cx.stack, cx.stack[prev:]...)
+ cx.Stack = append(cx.Stack, cx.Stack[prev:]...)
return nil
}
func opDig(cx *EvalContext) error {
depth := int(cx.program[cx.pc+1])
- idx := len(cx.stack) - 1 - depth
+ idx := len(cx.Stack) - 1 - depth
// Need to check stack size explicitly here because checkArgs() doesn't understand dig
// so we can't expect our stack to be prechecked.
if idx < 0 {
- return fmt.Errorf("dig %d with stack size = %d", depth, len(cx.stack))
+ return fmt.Errorf("dig %d with stack size = %d", depth, len(cx.Stack))
}
- sv := cx.stack[idx]
- cx.stack = append(cx.stack, sv)
+ sv := cx.Stack[idx]
+ cx.Stack = append(cx.Stack, sv)
return nil
}
func opCover(cx *EvalContext) error {
depth := int(cx.program[cx.pc+1])
- topIdx := len(cx.stack) - 1
+ topIdx := len(cx.Stack) - 1
idx := topIdx - depth
// Need to check stack size explicitly here because checkArgs() doesn't understand cover
// so we can't expect our stack to be prechecked.
if idx < 0 {
- return fmt.Errorf("cover %d with stack size = %d", depth, len(cx.stack))
+ return fmt.Errorf("cover %d with stack size = %d", depth, len(cx.Stack))
}
- sv := cx.stack[topIdx]
- copy(cx.stack[idx+1:], cx.stack[idx:])
- cx.stack[idx] = sv
+ sv := cx.Stack[topIdx]
+ copy(cx.Stack[idx+1:], cx.Stack[idx:])
+ cx.Stack[idx] = sv
return nil
}
func opUncover(cx *EvalContext) error {
depth := int(cx.program[cx.pc+1])
- topIdx := len(cx.stack) - 1
+ topIdx := len(cx.Stack) - 1
idx := topIdx - depth
// Need to check stack size explicitly here because checkArgs() doesn't understand uncover
// so we can't expect our stack to be prechecked.
if idx < 0 {
- return fmt.Errorf("uncover %d with stack size = %d", depth, len(cx.stack))
+ return fmt.Errorf("uncover %d with stack size = %d", depth, len(cx.Stack))
}
- sv := cx.stack[idx]
- copy(cx.stack[idx:], cx.stack[idx+1:])
- cx.stack[topIdx] = sv
+ sv := cx.Stack[idx]
+ copy(cx.Stack[idx:], cx.Stack[idx+1:])
+ cx.Stack[topIdx] = sv
return nil
}
@@ -2816,7 +2797,7 @@ func TxnFieldToTealValue(txn *transactions.Transaction, groupIndex int, field Tx
return basics.TealValue{}, fmt.Errorf("invalid field %s", field)
}
sv, err := cx.txnFieldToStack(stxnad, &fs, arrayFieldIdx, groupIndex, inner)
- return sv.toTealValue(), err
+ return sv.ToTealValue(), err
}
// currentTxID is a convenience method to get the Txid for the txn being evaluated
@@ -2907,7 +2888,7 @@ func (cx *EvalContext) getTxID(txn *transactions.Transaction, groupIndex int, in
func (cx *EvalContext) txnFieldToStack(stxn *transactions.SignedTxnWithAD, fs *txnFieldSpec, arrayFieldIdx uint64, groupIndex int, inner bool) (sv stackValue, err error) {
if fs.effects {
- if cx.runModeFlags == ModeSig {
+ if cx.runMode == ModeSig {
return sv, fmt.Errorf("txn[%s] not allowed in current mode", fs.field)
}
if cx.version < txnEffectsVersion && !inner {
@@ -3175,7 +3156,7 @@ func (cx *EvalContext) opTxnImpl(gi uint64, src txnSource, field TxnField, ai ui
case srcGroup:
if fs.effects && gi >= uint64(cx.groupIndex) {
// Test mode so that error is clearer
- if cx.runModeFlags == ModeSig {
+ if cx.runMode == ModeSig {
return sv, fmt.Errorf("txn[%s] not allowed in current mode", fs.field)
}
return sv, fmt.Errorf("txn effects can only be read from past txns %d %d", gi, cx.groupIndex)
@@ -3211,7 +3192,7 @@ func opTxn(cx *EvalContext) error {
return err
}
- cx.stack = append(cx.stack, sv)
+ cx.Stack = append(cx.Stack, sv)
return nil
}
@@ -3225,23 +3206,23 @@ func opTxna(cx *EvalContext) error {
return err
}
- cx.stack = append(cx.stack, sv)
+ cx.Stack = append(cx.Stack, sv)
return nil
}
func opTxnas(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
gi := uint64(cx.groupIndex)
field := TxnField(cx.program[cx.pc+1])
- ai := cx.stack[last].Uint
+ ai := cx.Stack[last].Uint
sv, err := cx.opTxnImpl(gi, srcGroup, field, ai, true)
if err != nil {
return err
}
- cx.stack[last] = sv
+ cx.Stack[last] = sv
return nil
}
@@ -3254,7 +3235,7 @@ func opGtxn(cx *EvalContext) error {
return err
}
- cx.stack = append(cx.stack, sv)
+ cx.Stack = append(cx.Stack, sv)
return nil
}
@@ -3268,30 +3249,30 @@ func opGtxna(cx *EvalContext) error {
return err
}
- cx.stack = append(cx.stack, sv)
+ cx.Stack = append(cx.Stack, sv)
return nil
}
func opGtxnas(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
gi := uint64(cx.program[cx.pc+1])
field := TxnField(cx.program[cx.pc+2])
- ai := cx.stack[last].Uint
+ ai := cx.Stack[last].Uint
sv, err := cx.opTxnImpl(gi, srcGroup, field, ai, true)
if err != nil {
return err
}
- cx.stack[last] = sv
+ cx.Stack[last] = sv
return nil
}
func opGtxns(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
- gi := cx.stack[last].Uint
+ gi := cx.Stack[last].Uint
field := TxnField(cx.program[cx.pc+1])
sv, err := cx.opTxnImpl(gi, srcGroup, field, 0, false)
@@ -3299,14 +3280,14 @@ func opGtxns(cx *EvalContext) error {
return err
}
- cx.stack[last] = sv
+ cx.Stack[last] = sv
return nil
}
func opGtxnsa(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
- gi := cx.stack[last].Uint
+ gi := cx.Stack[last].Uint
field := TxnField(cx.program[cx.pc+1])
ai := uint64(cx.program[cx.pc+2])
@@ -3315,25 +3296,25 @@ func opGtxnsa(cx *EvalContext) error {
return err
}
- cx.stack[last] = sv
+ cx.Stack[last] = sv
return nil
}
func opGtxnsas(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- gi := cx.stack[prev].Uint
+ gi := cx.Stack[prev].Uint
field := TxnField(cx.program[cx.pc+1])
- ai := cx.stack[last].Uint
+ ai := cx.Stack[last].Uint
sv, err := cx.opTxnImpl(gi, srcGroup, field, ai, true)
if err != nil {
return err
}
- cx.stack[prev] = sv
- cx.stack = cx.stack[:last]
+ cx.Stack[prev] = sv
+ cx.Stack = cx.Stack[:last]
return nil
}
@@ -3344,7 +3325,7 @@ func opItxn(cx *EvalContext) error {
if err != nil {
return err
}
- cx.stack = append(cx.stack, sv)
+ cx.Stack = append(cx.Stack, sv)
return nil
}
@@ -3357,22 +3338,22 @@ func opItxna(cx *EvalContext) error {
return err
}
- cx.stack = append(cx.stack, sv)
+ cx.Stack = append(cx.Stack, sv)
return nil
}
func opItxnas(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
field := TxnField(cx.program[cx.pc+1])
- ai := cx.stack[last].Uint
+ ai := cx.Stack[last].Uint
sv, err := cx.opTxnImpl(0, srcInner, field, ai, true)
if err != nil {
return err
}
- cx.stack[last] = sv
+ cx.Stack[last] = sv
return nil
}
@@ -3415,7 +3396,7 @@ func opGitxn(cx *EvalContext) error {
return err
}
- cx.stack = append(cx.stack, sv)
+ cx.Stack = append(cx.Stack, sv)
return nil
}
@@ -3429,23 +3410,23 @@ func opGitxna(cx *EvalContext) error {
return err
}
- cx.stack = append(cx.stack, sv)
+ cx.Stack = append(cx.Stack, sv)
return nil
}
func opGitxnas(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
gi := uint64(cx.program[cx.pc+1])
field := TxnField(cx.program[cx.pc+2])
- ai := cx.stack[last].Uint
+ ai := cx.Stack[last].Uint
sv, err := cx.opTxnImpl(gi, srcInnerGroup, field, ai, true)
if err != nil {
return err
}
- cx.stack[last] = sv
+ cx.Stack[last] = sv
return nil
}
@@ -3481,20 +3462,20 @@ func opGaid(cx *EvalContext) error {
return err
}
- cx.stack = append(cx.stack, sv)
+ cx.Stack = append(cx.Stack, sv)
return nil
}
func opGaids(cx *EvalContext) error {
- last := len(cx.stack) - 1
- gi := cx.stack[last].Uint
+ last := len(cx.Stack) - 1
+ gi := cx.Stack[last].Uint
sv, err := opGaidImpl(cx, gi, "gaids")
if err != nil {
return err
}
- cx.stack[last] = sv
+ cx.Stack[last] = sv
return nil
}
@@ -3593,7 +3574,7 @@ func opGlobal(cx *EvalContext) error {
if !ok || fs.version > cx.version {
return fmt.Errorf("invalid global field %s", globalField)
}
- if (cx.runModeFlags & fs.mode) == 0 {
+ if (cx.runMode & fs.mode) == 0 {
return fmt.Errorf("global[%s] not allowed in current mode", globalField)
}
@@ -3602,7 +3583,7 @@ func opGlobal(cx *EvalContext) error {
return err
}
- cx.stack = append(cx.stack, sv)
+ cx.Stack = append(cx.Stack, sv)
return nil
}
@@ -3628,47 +3609,47 @@ func (cx *EvalContext) programHash() crypto.Digest {
}
func opEd25519Verify(cx *EvalContext) error {
- last := len(cx.stack) - 1 // index of PK
+ last := len(cx.Stack) - 1 // index of PK
prev := last - 1 // index of signature
pprev := prev - 1 // index of data
var sv crypto.SignatureVerifier
- if len(cx.stack[last].Bytes) != len(sv) {
+ if len(cx.Stack[last].Bytes) != len(sv) {
return errors.New("invalid public key")
}
- copy(sv[:], cx.stack[last].Bytes)
+ copy(sv[:], cx.Stack[last].Bytes)
var sig crypto.Signature
- if len(cx.stack[prev].Bytes) != len(sig) {
+ if len(cx.Stack[prev].Bytes) != len(sig) {
return errors.New("invalid signature")
}
- copy(sig[:], cx.stack[prev].Bytes)
+ copy(sig[:], cx.Stack[prev].Bytes)
- msg := Msg{ProgramHash: cx.programHash(), Data: cx.stack[pprev].Bytes}
- cx.stack[pprev] = boolToSV(sv.Verify(msg, sig))
- cx.stack = cx.stack[:prev]
+ msg := Msg{ProgramHash: cx.programHash(), Data: cx.Stack[pprev].Bytes}
+ cx.Stack[pprev] = boolToSV(sv.Verify(msg, sig))
+ cx.Stack = cx.Stack[:prev]
return nil
}
func opEd25519VerifyBare(cx *EvalContext) error {
- last := len(cx.stack) - 1 // index of PK
+ last := len(cx.Stack) - 1 // index of PK
prev := last - 1 // index of signature
pprev := prev - 1 // index of data
var sv crypto.SignatureVerifier
- if len(cx.stack[last].Bytes) != len(sv) {
+ if len(cx.Stack[last].Bytes) != len(sv) {
return errors.New("invalid public key")
}
- copy(sv[:], cx.stack[last].Bytes)
+ copy(sv[:], cx.Stack[last].Bytes)
var sig crypto.Signature
- if len(cx.stack[prev].Bytes) != len(sig) {
+ if len(cx.Stack[prev].Bytes) != len(sig) {
return errors.New("invalid signature")
}
- copy(sig[:], cx.stack[prev].Bytes)
+ copy(sig[:], cx.Stack[prev].Bytes)
- cx.stack[pprev] = boolToSV(sv.VerifyBytes(cx.stack[pprev].Bytes, sig))
- cx.stack = cx.stack[:prev]
+ cx.Stack[pprev] = boolToSV(sv.VerifyBytes(cx.Stack[pprev].Bytes, sig))
+ cx.Stack = cx.Stack[:prev]
return nil
}
@@ -3700,17 +3681,17 @@ func opEcdsaVerify(cx *EvalContext) error {
return fmt.Errorf("unsupported curve %d", fs.field)
}
- last := len(cx.stack) - 1 // index of PK y
+ last := len(cx.Stack) - 1 // index of PK y
prev := last - 1 // index of PK x
pprev := prev - 1 // index of signature s
fourth := pprev - 1 // index of signature r
fifth := fourth - 1 // index of data
- pkY := cx.stack[last].Bytes
- pkX := cx.stack[prev].Bytes
- sigS := cx.stack[pprev].Bytes
- sigR := cx.stack[fourth].Bytes
- msg := cx.stack[fifth].Bytes
+ pkY := cx.Stack[last].Bytes
+ pkX := cx.Stack[prev].Bytes
+ sigS := cx.Stack[pprev].Bytes
+ sigR := cx.Stack[fourth].Bytes
+ msg := cx.Stack[fifth].Bytes
if len(msg) != 32 {
return fmt.Errorf("the signed data must be 32 bytes long, not %d", len(msg))
@@ -3740,8 +3721,8 @@ func opEcdsaVerify(cx *EvalContext) error {
}
}
- cx.stack[fifth] = boolToSV(result)
- cx.stack = cx.stack[:fourth]
+ cx.Stack[fifth] = boolToSV(result)
+ cx.Stack = cx.Stack[:fourth]
return nil
}
@@ -3761,9 +3742,9 @@ func opEcdsaPkDecompress(cx *EvalContext) error {
return fmt.Errorf("unsupported curve %d", fs.field)
}
- last := len(cx.stack) - 1 // compressed PK
+ last := len(cx.Stack) - 1 // compressed PK
- pubkey := cx.stack[last].Bytes
+ pubkey := cx.Stack[last].Bytes
var x, y *big.Int
if fs.field == Secp256k1 {
x, y = secp256k1.DecompressPubkey(pubkey)
@@ -3778,19 +3759,19 @@ func opEcdsaPkDecompress(cx *EvalContext) error {
}
var err error
- cx.stack[last].Uint = 0
- cx.stack[last].Bytes, err = leadingZeros(32, x)
+ cx.Stack[last].Uint = 0
+ cx.Stack[last].Bytes, err = leadingZeros(32, x)
if err != nil {
- return fmt.Errorf("x component zeroing failed: %s", err.Error())
+ return fmt.Errorf("x component zeroing failed: %w", err)
}
var sv stackValue
sv.Bytes, err = leadingZeros(32, y)
if err != nil {
- return fmt.Errorf("y component zeroing failed: %s", err.Error())
+ return fmt.Errorf("y component zeroing failed: %w", err)
}
- cx.stack = append(cx.stack, sv)
+ cx.Stack = append(cx.Stack, sv)
return nil
}
@@ -3805,15 +3786,15 @@ func opEcdsaPkRecover(cx *EvalContext) error {
return fmt.Errorf("unsupported curve %d", fs.field)
}
- last := len(cx.stack) - 1 // index of signature s
+ last := len(cx.Stack) - 1 // index of signature s
prev := last - 1 // index of signature r
pprev := prev - 1 // index of recovery id
fourth := pprev - 1 // index of data
- sigS := cx.stack[last].Bytes
- sigR := cx.stack[prev].Bytes
- recid := cx.stack[pprev].Uint
- msg := cx.stack[fourth].Bytes
+ sigS := cx.Stack[last].Bytes
+ sigR := cx.Stack[prev].Bytes
+ recid := cx.Stack[pprev].Uint
+ msg := cx.Stack[fourth].Bytes
if recid > 3 {
return fmt.Errorf("invalid recovery id: %d", recid)
@@ -3833,53 +3814,53 @@ func opEcdsaPkRecover(cx *EvalContext) error {
return fmt.Errorf("pubkey unmarshal failed")
}
- cx.stack[fourth].Uint = 0
- cx.stack[fourth].Bytes, err = leadingZeros(32, x)
+ cx.Stack[fourth].Uint = 0
+ cx.Stack[fourth].Bytes, err = leadingZeros(32, x)
if err != nil {
return fmt.Errorf("x component zeroing failed: %s", err.Error())
}
- cx.stack[pprev].Uint = 0
- cx.stack[pprev].Bytes, err = leadingZeros(32, y)
+ cx.Stack[pprev].Uint = 0
+ cx.Stack[pprev].Bytes, err = leadingZeros(32, y)
if err != nil {
return fmt.Errorf("y component zeroing failed: %s", err.Error())
}
- cx.stack = cx.stack[:prev]
+ cx.Stack = cx.Stack[:prev]
return nil
}
func opLoad(cx *EvalContext) error {
n := cx.program[cx.pc+1]
- cx.stack = append(cx.stack, cx.scratch[n])
+ cx.Stack = append(cx.Stack, cx.Scratch[n])
return nil
}
func opLoads(cx *EvalContext) error {
- last := len(cx.stack) - 1
- n := cx.stack[last].Uint
- if n >= uint64(len(cx.scratch)) {
+ last := len(cx.Stack) - 1
+ n := cx.Stack[last].Uint
+ if n >= uint64(len(cx.Scratch)) {
return fmt.Errorf("invalid Scratch index %d", n)
}
- cx.stack[last] = cx.scratch[n]
+ cx.Stack[last] = cx.Scratch[n]
return nil
}
func opStore(cx *EvalContext) error {
n := cx.program[cx.pc+1]
- last := len(cx.stack) - 1
- cx.scratch[n] = cx.stack[last]
- cx.stack = cx.stack[:last]
+ last := len(cx.Stack) - 1
+ cx.Scratch[n] = cx.Stack[last]
+ cx.Stack = cx.Stack[:last]
return nil
}
func opStores(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- n := cx.stack[prev].Uint
- if n >= uint64(len(cx.scratch)) {
+ n := cx.Stack[prev].Uint
+ if n >= uint64(len(cx.Scratch)) {
return fmt.Errorf("invalid Scratch index %d", n)
}
- cx.scratch[n] = cx.stack[last]
- cx.stack = cx.stack[:prev]
+ cx.Scratch[n] = cx.Stack[last]
+ cx.Stack = cx.Stack[:prev]
return nil
}
@@ -3888,7 +3869,7 @@ func opGloadImpl(cx *EvalContext, gi int, scratchIdx byte, opName string) (stack
if gi >= len(cx.TxnGroup) {
return none, fmt.Errorf("%s lookup TxnGroup[%d] but it only has %d", opName, gi, len(cx.TxnGroup))
}
- if int(scratchIdx) >= len(cx.scratch) {
+ if int(scratchIdx) >= len(cx.Scratch) {
return none, fmt.Errorf("invalid Scratch index %d", scratchIdx)
}
if cx.TxnGroup[gi].Txn.Type != protocol.ApplicationCallTx {
@@ -3912,13 +3893,13 @@ func opGload(cx *EvalContext) error {
return err
}
- cx.stack = append(cx.stack, scratchValue)
+ cx.Stack = append(cx.Stack, scratchValue)
return nil
}
func opGloads(cx *EvalContext) error {
- last := len(cx.stack) - 1
- gi := cx.stack[last].Uint
+ last := len(cx.Stack) - 1
+ gi := cx.Stack[last].Uint
if gi >= uint64(len(cx.TxnGroup)) {
return fmt.Errorf("gloads lookup TxnGroup[%d] but it only has %d", gi, len(cx.TxnGroup))
}
@@ -3928,19 +3909,19 @@ func opGloads(cx *EvalContext) error {
return err
}
- cx.stack[last] = scratchValue
+ cx.Stack[last] = scratchValue
return nil
}
func opGloadss(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- gi := cx.stack[prev].Uint
+ gi := cx.Stack[prev].Uint
if gi >= uint64(len(cx.TxnGroup)) {
return fmt.Errorf("gloadss lookup TxnGroup[%d] but it only has %d", gi, len(cx.TxnGroup))
}
- scratchIdx := cx.stack[last].Uint
+ scratchIdx := cx.Stack[last].Uint
if scratchIdx >= 256 {
return fmt.Errorf("gloadss scratch index >= 256 (%d)", scratchIdx)
}
@@ -3949,22 +3930,22 @@ func opGloadss(cx *EvalContext) error {
return err
}
- cx.stack[prev] = scratchValue
- cx.stack = cx.stack[:last]
+ cx.Stack[prev] = scratchValue
+ cx.Stack = cx.Stack[:last]
return nil
}
func opConcat(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- a := cx.stack[prev].Bytes
- b := cx.stack[last].Bytes
+ a := cx.Stack[prev].Bytes
+ b := cx.Stack[last].Bytes
newlen := len(a) + len(b)
newvalue := make([]byte, newlen)
copy(newvalue, a)
copy(newvalue[len(a):], b)
- cx.stack[prev].Bytes = newvalue
- cx.stack = cx.stack[:last]
+ cx.Stack[prev].Bytes = newvalue
+ cx.Stack = cx.Stack[:last]
return nil
}
@@ -3979,34 +3960,34 @@ func substring(x []byte, start, end int) ([]byte, error) {
}
func opSubstring(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
start := cx.program[cx.pc+1]
end := cx.program[cx.pc+2]
- bytes, err := substring(cx.stack[last].Bytes, int(start), int(end))
- cx.stack[last].Bytes = bytes
+ bytes, err := substring(cx.Stack[last].Bytes, int(start), int(end))
+ cx.Stack[last].Bytes = bytes
return err
}
func opSubstring3(cx *EvalContext) error {
- last := len(cx.stack) - 1 // end
+ last := len(cx.Stack) - 1 // end
prev := last - 1 // start
pprev := prev - 1 // bytes
- start := cx.stack[prev].Uint
- end := cx.stack[last].Uint
+ start := cx.Stack[prev].Uint
+ end := cx.Stack[last].Uint
if start > math.MaxInt32 || end > math.MaxInt32 {
return errors.New("substring range beyond length of string")
}
- bytes, err := substring(cx.stack[pprev].Bytes, int(start), int(end))
- cx.stack[pprev].Bytes = bytes
- cx.stack = cx.stack[:prev]
+ bytes, err := substring(cx.Stack[pprev].Bytes, int(start), int(end))
+ cx.Stack[pprev].Bytes = bytes
+ cx.Stack = cx.Stack[:prev]
return err
}
func opGetBit(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- idx := cx.stack[last].Uint
- target := cx.stack[prev]
+ idx := cx.Stack[last].Uint
+ target := cx.Stack[prev]
var bit uint64
if target.avmType() == avmUint64 {
@@ -4032,20 +4013,20 @@ func opGetBit(cx *EvalContext) error {
mask := byte(0x80) >> bitIdx
bit = uint64((byteVal & mask) >> (7 - bitIdx))
}
- cx.stack[prev].Uint = bit
- cx.stack[prev].Bytes = nil
- cx.stack = cx.stack[:last]
+ cx.Stack[prev].Uint = bit
+ cx.Stack[prev].Bytes = nil
+ cx.Stack = cx.Stack[:last]
return nil
}
func opSetBit(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
pprev := prev - 1
- bit := cx.stack[last].Uint
- idx := cx.stack[prev].Uint
- target := cx.stack[pprev]
+ bit := cx.Stack[last].Uint
+ idx := cx.Stack[prev].Uint
+ target := cx.Stack[pprev]
if bit > 1 {
return errors.New("setbit value > 1")
@@ -4057,9 +4038,9 @@ func opSetBit(cx *EvalContext) error {
}
mask := uint64(1) << idx
if bit == uint64(1) {
- cx.stack[pprev].Uint |= mask // manipulate stack in place
+ cx.Stack[pprev].Uint |= mask // manipulate stack in place
} else {
- cx.stack[pprev].Uint &^= mask // manipulate stack in place
+ cx.Stack[pprev].Uint &^= mask // manipulate stack in place
}
} else {
// indexing into a byteslice
@@ -4082,42 +4063,42 @@ func opSetBit(cx *EvalContext) error {
} else {
scratch[byteIdx] &^= mask
}
- cx.stack[pprev].Bytes = scratch
+ cx.Stack[pprev].Bytes = scratch
}
- cx.stack = cx.stack[:prev]
+ cx.Stack = cx.Stack[:prev]
return nil
}
func opGetByte(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- idx := cx.stack[last].Uint
- target := cx.stack[prev]
+ idx := cx.Stack[last].Uint
+ target := cx.Stack[prev]
if idx >= uint64(len(target.Bytes)) {
return errors.New("getbyte index beyond array length")
}
- cx.stack[prev].Uint = uint64(target.Bytes[idx])
- cx.stack[prev].Bytes = nil
- cx.stack = cx.stack[:last]
+ cx.Stack[prev].Uint = uint64(target.Bytes[idx])
+ cx.Stack[prev].Bytes = nil
+ cx.Stack = cx.Stack[:last]
return nil
}
func opSetByte(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
pprev := prev - 1
- if cx.stack[last].Uint > 255 {
+ if cx.Stack[last].Uint > 255 {
return errors.New("setbyte value > 255")
}
- if cx.stack[prev].Uint >= uint64(len(cx.stack[pprev].Bytes)) {
+ if cx.Stack[prev].Uint >= uint64(len(cx.Stack[pprev].Bytes)) {
return errors.New("setbyte index beyond array length")
}
// Copy to avoid modifying shared slice
- cx.stack[pprev].Bytes = append([]byte(nil), cx.stack[pprev].Bytes...)
- cx.stack[pprev].Bytes[cx.stack[prev].Uint] = byte(cx.stack[last].Uint)
- cx.stack = cx.stack[:prev]
+ cx.Stack[pprev].Bytes = append([]byte(nil), cx.Stack[pprev].Bytes...)
+ cx.Stack[pprev].Bytes[cx.Stack[prev].Uint] = byte(cx.Stack[last].Uint)
+ cx.Stack = cx.Stack[:prev]
return nil
}
@@ -4136,29 +4117,29 @@ func extractCarefully(x []byte, start, length uint64) ([]byte, error) {
}
func opExtract(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
start := uint64(cx.program[cx.pc+1])
length := uint64(cx.program[cx.pc+2])
// Shortcut: if length is 0, take bytes from start index to the end
if length == 0 {
// If length has wrapped, it's because start > len(), so extractCarefully will report
- length = uint64(len(cx.stack[last].Bytes) - int(start))
+ length = uint64(len(cx.Stack[last].Bytes) - int(start))
}
- bytes, err := extractCarefully(cx.stack[last].Bytes, start, length)
- cx.stack[last].Bytes = bytes
+ bytes, err := extractCarefully(cx.Stack[last].Bytes, start, length)
+ cx.Stack[last].Bytes = bytes
return err
}
func opExtract3(cx *EvalContext) error {
- last := len(cx.stack) - 1 // length
+ last := len(cx.Stack) - 1 // length
prev := last - 1 // start
pprev := prev - 1 // bytes
- start := cx.stack[prev].Uint
- length := cx.stack[last].Uint
- bytes, err := extractCarefully(cx.stack[pprev].Bytes, start, length)
- cx.stack[pprev].Bytes = bytes
- cx.stack = cx.stack[:prev]
+ start := cx.Stack[prev].Uint
+ length := cx.Stack[last].Uint
+ bytes, err := extractCarefully(cx.Stack[pprev].Bytes, start, length)
+ cx.Stack[pprev].Bytes = bytes
+ cx.Stack = cx.Stack[:prev]
return err
}
@@ -4187,37 +4168,37 @@ func replaceCarefully(original []byte, replacement []byte, start uint64) ([]byte
}
func opReplace2(cx *EvalContext) error {
- last := len(cx.stack) - 1 // replacement
+ last := len(cx.Stack) - 1 // replacement
prev := last - 1 // original
- replacement := cx.stack[last].Bytes
+ replacement := cx.Stack[last].Bytes
start := uint64(cx.program[cx.pc+1])
- original := cx.stack[prev].Bytes
+ original := cx.Stack[prev].Bytes
bytes, err := replaceCarefully(original, replacement, start)
if err != nil {
return err
}
- cx.stack[prev].Bytes = bytes
- cx.stack = cx.stack[:last]
+ cx.Stack[prev].Bytes = bytes
+ cx.Stack = cx.Stack[:last]
return err
}
func opReplace3(cx *EvalContext) error {
- last := len(cx.stack) - 1 // replacement
+ last := len(cx.Stack) - 1 // replacement
prev := last - 1 // start
pprev := prev - 1 // original
- replacement := cx.stack[last].Bytes
- start := cx.stack[prev].Uint
- original := cx.stack[pprev].Bytes
+ replacement := cx.Stack[last].Bytes
+ start := cx.Stack[prev].Uint
+ original := cx.Stack[pprev].Bytes
bytes, err := replaceCarefully(original, replacement, start)
if err != nil {
return err
}
- cx.stack[pprev].Bytes = bytes
- cx.stack = cx.stack[:prev]
+ cx.Stack[pprev].Bytes = bytes
+ cx.Stack = cx.Stack[:prev]
return err
}
@@ -4233,16 +4214,16 @@ func convertBytesToInt(x []byte) uint64 {
}
func opExtractNBytes(cx *EvalContext, n uint64) error {
- last := len(cx.stack) - 1 // start
+ last := len(cx.Stack) - 1 // start
prev := last - 1 // bytes
- start := cx.stack[last].Uint
- bytes, err := extractCarefully(cx.stack[prev].Bytes, start, n) // extract n bytes
+ start := cx.Stack[last].Uint
+ bytes, err := extractCarefully(cx.Stack[prev].Bytes, start, n) // extract n bytes
if err != nil {
return err
}
- cx.stack[prev].Uint = convertBytesToInt(bytes)
- cx.stack[prev].Bytes = nil
- cx.stack = cx.stack[:last]
+ cx.Stack[prev].Uint = convertBytesToInt(bytes)
+ cx.Stack[prev].Bytes = nil
+ cx.Stack = cx.Stack[:last]
return nil
}
@@ -4269,7 +4250,7 @@ func (cx *EvalContext) assignAccount(sv stackValue) (basics.Address, error) {
if cx.availableAccount(addr) {
return addr, nil
}
- return basics.Address{}, fmt.Errorf("invalid Account reference %s", addr)
+ return basics.Address{}, fmt.Errorf("unavailable Account %s", addr)
}
// accountReference yields the address and Accounts offset designated by a
@@ -4391,9 +4372,9 @@ func (cx *EvalContext) mutableAccountReference(account stackValue) (basics.Addre
}
func opBalance(cx *EvalContext) error {
- last := len(cx.stack) - 1 // account (index or actual address)
+ last := len(cx.Stack) - 1 // account (index or actual address)
- addr, _, err := cx.accountReference(cx.stack[last])
+ addr, _, err := cx.accountReference(cx.Stack[last])
if err != nil {
return err
}
@@ -4403,15 +4384,15 @@ func opBalance(cx *EvalContext) error {
return err
}
- cx.stack[last].Bytes = nil
- cx.stack[last].Uint = account.MicroAlgos.Raw
+ cx.Stack[last].Bytes = nil
+ cx.Stack[last].Uint = account.MicroAlgos.Raw
return nil
}
func opMinBalance(cx *EvalContext) error {
- last := len(cx.stack) - 1 // account (index or actual address)
+ last := len(cx.Stack) - 1 // account (index or actual address)
- addr, _, err := cx.accountReference(cx.stack[last])
+ addr, _, err := cx.accountReference(cx.Stack[last])
if err != nil {
return err
}
@@ -4421,16 +4402,16 @@ func opMinBalance(cx *EvalContext) error {
return err
}
- cx.stack[last].Bytes = nil
- cx.stack[last].Uint = account.MinBalance(cx.Proto).Raw
+ cx.Stack[last].Bytes = nil
+ cx.Stack[last].Uint = account.MinBalance(cx.Proto).Raw
return nil
}
func opAppOptedIn(cx *EvalContext) error {
- last := len(cx.stack) - 1 // app
+ last := len(cx.Stack) - 1 // app
prev := last - 1 // account
- addr, app, _, err := cx.localsReference(cx.stack[prev], cx.stack[last].Uint)
+ addr, app, _, err := cx.localsReference(cx.Stack[prev], cx.Stack[last].Uint)
if err != nil {
return err
}
@@ -4440,43 +4421,43 @@ func opAppOptedIn(cx *EvalContext) error {
return err
}
- cx.stack[prev] = boolToSV(optedIn)
- cx.stack = cx.stack[:last]
+ cx.Stack[prev] = boolToSV(optedIn)
+ cx.Stack = cx.Stack[:last]
return nil
}
func opAppLocalGet(cx *EvalContext) error {
- last := len(cx.stack) - 1 // state key
+ last := len(cx.Stack) - 1 // state key
prev := last - 1 // account
- key := cx.stack[last].Bytes
+ key := cx.Stack[last].Bytes
- result, _, err := opAppLocalGetImpl(cx, 0, key, cx.stack[prev])
+ result, _, err := opAppLocalGetImpl(cx, 0, key, cx.Stack[prev])
if err != nil {
return err
}
- cx.stack[prev] = result
- cx.stack = cx.stack[:last]
+ cx.Stack[prev] = result
+ cx.Stack = cx.Stack[:last]
return nil
}
func opAppLocalGetEx(cx *EvalContext) error {
- last := len(cx.stack) - 1 // state key
+ last := len(cx.Stack) - 1 // state key
prev := last - 1 // app id
pprev := prev - 1 // account
- key := cx.stack[last].Bytes
- appID := cx.stack[prev].Uint
+ key := cx.Stack[last].Bytes
+ appID := cx.Stack[prev].Uint
- result, ok, err := opAppLocalGetImpl(cx, appID, key, cx.stack[pprev])
+ result, ok, err := opAppLocalGetImpl(cx, appID, key, cx.Stack[pprev])
if err != nil {
return err
}
- cx.stack[pprev] = result
- cx.stack[prev] = boolToSV(ok)
- cx.stack = cx.stack[:last]
+ cx.Stack[pprev] = result
+ cx.Stack[prev] = boolToSV(ok)
+ cx.Stack = cx.Stack[:last]
return nil
}
@@ -4515,32 +4496,32 @@ func opAppGetGlobalStateImpl(cx *EvalContext, appIndex uint64, key []byte) (resu
}
func opAppGlobalGet(cx *EvalContext) error {
- last := len(cx.stack) - 1 // state key
+ last := len(cx.Stack) - 1 // state key
- key := cx.stack[last].Bytes
+ key := cx.Stack[last].Bytes
result, _, err := opAppGetGlobalStateImpl(cx, 0, key)
if err != nil {
return err
}
- cx.stack[last] = result
+ cx.Stack[last] = result
return nil
}
func opAppGlobalGetEx(cx *EvalContext) error {
- last := len(cx.stack) - 1 // state key
+ last := len(cx.Stack) - 1 // state key
prev := last - 1 // app
- key := cx.stack[last].Bytes
+ key := cx.Stack[last].Bytes
- result, ok, err := opAppGetGlobalStateImpl(cx, cx.stack[prev].Uint, key)
+ result, ok, err := opAppGetGlobalStateImpl(cx, cx.Stack[prev].Uint, key)
if err != nil {
return err
}
- cx.stack[prev] = result
- cx.stack[last] = boolToSV(ok)
+ cx.Stack[prev] = result
+ cx.Stack[last] = boolToSV(ok)
return nil
}
@@ -4571,12 +4552,12 @@ func (cx *EvalContext) ensureLocalDelta(accountIdx uint64, addr basics.Address)
}
func opAppLocalPut(cx *EvalContext) error {
- last := len(cx.stack) - 1 // value
+ last := len(cx.Stack) - 1 // value
prev := last - 1 // state key
pprev := prev - 1 // account
- sv := cx.stack[last]
- key := string(cx.stack[prev].Bytes)
+ sv := cx.Stack[last]
+ key := string(cx.Stack[prev].Bytes)
// Enforce key lengths. Now, this is the same as enforced by ledger, but if
// it ever to change in proto, we would need to isolate changes to different
@@ -4585,7 +4566,7 @@ func opAppLocalPut(cx *EvalContext) error {
return fmt.Errorf("key too long: length was %d, maximum is %d", len(key), cx.Proto.MaxAppKeyLen)
}
- addr, accountIdx, err := cx.mutableAccountReference(cx.stack[pprev])
+ addr, accountIdx, err := cx.mutableAccountReference(cx.Stack[pprev])
if err != nil {
return err
}
@@ -4604,7 +4585,7 @@ func opAppLocalPut(cx *EvalContext) error {
return err
}
- tv := sv.toTealValue()
+ tv := sv.ToTealValue()
if !ok || tv != etv {
accountIdx = cx.ensureLocalDelta(accountIdx, addr)
cx.txn.EvalDelta.LocalDeltas[accountIdx][key] = tv.ToValueDelta()
@@ -4625,16 +4606,16 @@ func opAppLocalPut(cx *EvalContext) error {
return err
}
- cx.stack = cx.stack[:pprev]
+ cx.Stack = cx.Stack[:pprev]
return nil
}
func opAppGlobalPut(cx *EvalContext) error {
- last := len(cx.stack) - 1 // value
+ last := len(cx.Stack) - 1 // value
prev := last - 1 // state key
- sv := cx.stack[last]
- key := string(cx.stack[prev].Bytes)
+ sv := cx.Stack[last]
+ key := string(cx.Stack[prev].Bytes)
// Enforce maximum key length. Currently this is the same as enforced by
// ledger. If it were ever to change in proto, we would need to isolate
@@ -4650,7 +4631,7 @@ func opAppGlobalPut(cx *EvalContext) error {
if err != nil {
return err
}
- tv := sv.toTealValue()
+ tv := sv.ToTealValue()
if !ok || tv != etv {
cx.txn.EvalDelta.GlobalDelta[key] = tv.ToValueDelta()
}
@@ -4670,17 +4651,17 @@ func opAppGlobalPut(cx *EvalContext) error {
return err
}
- cx.stack = cx.stack[:prev]
+ cx.Stack = cx.Stack[:prev]
return nil
}
func opAppLocalDel(cx *EvalContext) error {
- last := len(cx.stack) - 1 // key
+ last := len(cx.Stack) - 1 // key
prev := last - 1 // account
- key := string(cx.stack[last].Bytes)
+ key := string(cx.Stack[last].Bytes)
- addr, accountIdx, err := cx.mutableAccountReference(cx.stack[prev])
+ addr, accountIdx, err := cx.mutableAccountReference(cx.Stack[prev])
if err != nil {
return err
}
@@ -4694,9 +4675,9 @@ func opAppLocalDel(cx *EvalContext) error {
// if deleting a non-existent value, don't record in EvalDelta, matching
// ledger behavior with previous BuildEvalDelta mechanism
- if _, ok, err := cx.Ledger.GetLocal(addr, cx.appID, key, accountIdx); ok {
- if err != nil {
- return err
+ if _, ok, getErr := cx.Ledger.GetLocal(addr, cx.appID, key, accountIdx); ok {
+ if getErr != nil {
+ return getErr
}
accountIdx = cx.ensureLocalDelta(accountIdx, addr)
cx.txn.EvalDelta.LocalDeltas[accountIdx][key] = basics.ValueDelta{
@@ -4709,14 +4690,14 @@ func opAppLocalDel(cx *EvalContext) error {
return err
}
- cx.stack = cx.stack[:prev]
+ cx.Stack = cx.Stack[:prev]
return nil
}
func opAppGlobalDel(cx *EvalContext) error {
- last := len(cx.stack) - 1 // key
+ last := len(cx.Stack) - 1 // key
- key := string(cx.stack[last].Bytes)
+ key := string(cx.Stack[last].Bytes)
// if deleting a non-existent value, don't record in EvalDelta, matching
// ledger behavior with previous BuildEvalDelta mechanism
@@ -4733,7 +4714,7 @@ func opAppGlobalDel(cx *EvalContext) error {
if err != nil {
return err
}
- cx.stack = cx.stack[:last]
+ cx.Stack = cx.Stack[:last]
return nil
}
@@ -4953,7 +4934,7 @@ func (cx *EvalContext) holdingReference(account stackValue, ref uint64) (basics.
}
func opAssetHoldingGet(cx *EvalContext) error {
- last := len(cx.stack) - 1 // asset
+ last := len(cx.Stack) - 1 // asset
prev := last - 1 // account
holdingField := AssetHoldingField(cx.program[cx.pc+1])
@@ -4962,7 +4943,7 @@ func opAssetHoldingGet(cx *EvalContext) error {
return fmt.Errorf("invalid asset_holding_get field %d", holdingField)
}
- addr, asset, err := cx.holdingReference(cx.stack[prev], cx.stack[last].Uint)
+ addr, asset, err := cx.holdingReference(cx.Stack[prev], cx.Stack[last].Uint)
if err != nil {
return err
}
@@ -4978,13 +4959,13 @@ func opAssetHoldingGet(cx *EvalContext) error {
}
}
- cx.stack[prev] = value
- cx.stack[last].Uint = exist
+ cx.Stack[prev] = value
+ cx.Stack[last].Uint = exist
return nil
}
func opAssetParamsGet(cx *EvalContext) error {
- last := len(cx.stack) - 1 // asset
+ last := len(cx.Stack) - 1 // asset
paramField := AssetParamsField(cx.program[cx.pc+1])
fs, ok := assetParamsFieldSpecByField(paramField)
@@ -4992,7 +4973,7 @@ func opAssetParamsGet(cx *EvalContext) error {
return fmt.Errorf("invalid asset_params_get field %d", paramField)
}
- asset, err := cx.assetReference(cx.stack[last].Uint, true)
+ asset, err := cx.assetReference(cx.Stack[last].Uint, true)
if err != nil {
return err
}
@@ -5008,13 +4989,13 @@ func opAssetParamsGet(cx *EvalContext) error {
}
}
- cx.stack[last] = value
- cx.stack = append(cx.stack, stackValue{Uint: exist})
+ cx.Stack[last] = value
+ cx.Stack = append(cx.Stack, stackValue{Uint: exist})
return nil
}
func opAppParamsGet(cx *EvalContext) error {
- last := len(cx.stack) - 1 // app
+ last := len(cx.Stack) - 1 // app
paramField := AppParamsField(cx.program[cx.pc+1])
fs, ok := appParamsFieldSpecByField(paramField)
@@ -5022,7 +5003,7 @@ func opAppParamsGet(cx *EvalContext) error {
return fmt.Errorf("invalid app_params_get field %d", paramField)
}
- app, err := cx.appReference(cx.stack[last].Uint, true)
+ app, err := cx.appReference(cx.Stack[last].Uint, true)
if err != nil {
return err
}
@@ -5047,15 +5028,15 @@ func opAppParamsGet(cx *EvalContext) error {
}
}
- cx.stack[last] = value
- cx.stack = append(cx.stack, stackValue{Uint: exist})
+ cx.Stack[last] = value
+ cx.Stack = append(cx.Stack, stackValue{Uint: exist})
return nil
}
func opAcctParamsGet(cx *EvalContext) error {
- last := len(cx.stack) - 1 // acct
+ last := len(cx.Stack) - 1 // acct
- addr, _, err := cx.accountReference(cx.stack[last])
+ addr, _, err := cx.accountReference(cx.Stack[last])
if err != nil {
return err
}
@@ -5101,24 +5082,24 @@ func opAcctParamsGet(cx *EvalContext) error {
case AcctTotalBoxBytes:
value.Uint = account.TotalBoxBytes
}
- cx.stack[last] = value
- cx.stack = append(cx.stack, boolToSV(account.MicroAlgos.Raw > 0))
+ cx.Stack[last] = value
+ cx.Stack = append(cx.Stack, boolToSV(account.MicroAlgos.Raw > 0))
return nil
}
func opLog(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
if uint64(len(cx.txn.EvalDelta.Logs)) >= cx.MaxLogCalls {
return fmt.Errorf("too many log calls in program. up to %d is allowed", cx.MaxLogCalls)
}
- log := cx.stack[last]
+ log := cx.Stack[last]
cx.logSize += len(log.Bytes)
if uint64(cx.logSize) > cx.MaxLogSize {
return fmt.Errorf("program logs too large. %d bytes > %d bytes limit", cx.logSize, cx.MaxLogSize)
}
cx.txn.EvalDelta.Logs = append(cx.txn.EvalDelta.Logs, string(log.Bytes))
- cx.stack = cx.stack[:last]
+ cx.Stack = cx.Stack[:last]
return nil
}
@@ -5179,7 +5160,7 @@ func addInnerTxn(cx *EvalContext) error {
return nil
}
-func opTxBegin(cx *EvalContext) error {
+func opItxnBegin(cx *EvalContext) error {
if len(cx.subtxns) > 0 {
return errors.New("itxn_begin without itxn_submit")
}
@@ -5328,8 +5309,7 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs *txnFieldSpec, txn *t
if len(sv.Bytes) > cx.Proto.MaxTxnNoteBytes {
return fmt.Errorf("%s may not exceed %d bytes", fs.field, cx.Proto.MaxTxnNoteBytes)
}
- txn.Note = make([]byte, len(sv.Bytes))
- copy(txn.Note, sv.Bytes)
+ txn.Note = slices.Clone(sv.Bytes)
// GenesisID, GenesisHash unsettable: surely makes no sense
// Group unsettable: Can't make groups from AVM (yet?)
// Lease unsettable: This seems potentially useful.
@@ -5391,14 +5371,8 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs *txnFieldSpec, txn *t
txn.AssetParams.Total, err = sv.uint()
case ConfigAssetDecimals:
var decimals uint64
- decimals, err = sv.uint()
- if err == nil {
- if decimals > uint64(cx.Proto.MaxAssetDecimals) {
- err = fmt.Errorf("too many decimals (%d)", decimals)
- } else {
- txn.AssetParams.Decimals = uint32(decimals)
- }
- }
+ decimals, err = sv.uintMaxed(uint64(cx.Proto.MaxAssetDecimals))
+ txn.AssetParams.Decimals = uint32(decimals)
case ConfigAssetDefaultFrozen:
txn.AssetParams.DefaultFrozen, err = sv.bool()
case ConfigAssetUnitName:
@@ -5449,9 +5423,7 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs *txnFieldSpec, txn *t
if len(txn.ApplicationArgs) >= cx.Proto.MaxAppArgs {
return errors.New("too many application args")
}
- new := make([]byte, len(sv.Bytes))
- copy(new, sv.Bytes)
- txn.ApplicationArgs = append(txn.ApplicationArgs, new)
+ txn.ApplicationArgs = append(txn.ApplicationArgs, slices.Clone(sv.Bytes))
case Accounts:
var new basics.Address
new, err = cx.assignAccount(sv)
@@ -5467,15 +5439,13 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs *txnFieldSpec, txn *t
if len(sv.Bytes) > maxPossible {
return fmt.Errorf("%s may not exceed %d bytes", fs.field, maxPossible)
}
- txn.ApprovalProgram = make([]byte, len(sv.Bytes))
- copy(txn.ApprovalProgram, sv.Bytes)
+ txn.ApprovalProgram = slices.Clone(sv.Bytes)
case ClearStateProgram:
maxPossible := cx.Proto.MaxAppProgramLen * (1 + cx.Proto.MaxExtraAppProgramPages)
if len(sv.Bytes) > maxPossible {
return fmt.Errorf("%s may not exceed %d bytes", fs.field, maxPossible)
}
- txn.ClearStateProgram = make([]byte, len(sv.Bytes))
- copy(txn.ClearStateProgram, sv.Bytes)
+ txn.ClearStateProgram = slices.Clone(sv.Bytes)
case ApprovalProgramPages:
maxPossible := cx.Proto.MaxAppProgramLen * (1 + cx.Proto.MaxExtraAppProgramPages)
txn.ApprovalProgram = append(txn.ApprovalProgram, sv.Bytes...)
@@ -5539,15 +5509,15 @@ func opItxnField(cx *EvalContext) error {
if itx < 0 {
return errors.New("itxn_field without itxn_begin")
}
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
field := TxnField(cx.program[cx.pc+1])
fs, ok := txnFieldSpecByField(field)
if !ok || fs.itxVersion == 0 || fs.itxVersion > cx.version {
return fmt.Errorf("invalid itxn_field %s", field)
}
- sv := cx.stack[last]
+ sv := cx.Stack[last]
err := cx.stackIntoTxnField(sv, &fs, &cx.subtxns[itx].Txn)
- cx.stack = cx.stack[:last] // pop
+ cx.Stack = cx.Stack[:last] // pop
return err
}
@@ -5597,16 +5567,16 @@ func opItxnSubmit(cx *EvalContext) (err error) {
// transaction pool. Namely that any transaction that makes it
// to Perform (which is equivalent to eval.applyTransaction)
// is authorized, and WellFormed.
- err := authorizedSender(cx, cx.subtxns[itx].Txn.Sender)
- if err != nil {
- return err
+ txnErr := authorizedSender(cx, cx.subtxns[itx].Txn.Sender)
+ if txnErr != nil {
+ return txnErr
}
// Recall that WellFormed does not care about individual
// transaction fees because of fee pooling. Checked above.
- err = cx.subtxns[itx].Txn.WellFormed(*cx.Specials, *cx.Proto)
- if err != nil {
- return err
+ txnErr = cx.subtxns[itx].Txn.WellFormed(*cx.Specials, *cx.Proto)
+ if txnErr != nil {
+ return txnErr
}
var calledVersion uint64
@@ -5630,9 +5600,9 @@ func opItxnSubmit(cx *EvalContext) (err error) {
// Set program by txn, approval, or clear state
program := cx.subtxns[itx].Txn.ApprovalProgram
if cx.subtxns[itx].Txn.ApplicationID != 0 {
- app, _, err := cx.Ledger.AppParams(cx.subtxns[itx].Txn.ApplicationID)
- if err != nil {
- return err
+ app, _, paramsErr := cx.Ledger.AppParams(cx.subtxns[itx].Txn.ApplicationID)
+ if paramsErr != nil {
+ return paramsErr
}
program = app.ApprovalProgram
if cx.subtxns[itx].Txn.OnCompletion == transactions.ClearStateOC {
@@ -5656,15 +5626,15 @@ func opItxnSubmit(cx *EvalContext) (err error) {
if cx.subtxns[itx].Txn.OnCompletion == transactions.OptInOC {
csp := cx.subtxns[itx].Txn.ClearStateProgram
if cx.subtxns[itx].Txn.ApplicationID != 0 {
- app, _, err := cx.Ledger.AppParams(cx.subtxns[itx].Txn.ApplicationID)
- if err != nil {
- return err
+ app, _, paramsErr := cx.Ledger.AppParams(cx.subtxns[itx].Txn.ApplicationID)
+ if paramsErr != nil {
+ return paramsErr
}
csp = app.ClearStateProgram
}
- csv, _, err := transactions.ProgramVersion(csp)
- if err != nil {
- return err
+ csv, _, verErr := transactions.ProgramVersion(csp)
+ if verErr != nil {
+ return verErr
}
if csv < cx.Proto.MinInnerApplVersion {
return fmt.Errorf("inner app call opt-in with CSP v%d < v%d",
@@ -5752,19 +5722,19 @@ func (rm rawMessage) ToBeHashed() (protocol.HashID, []byte) {
}
func opVrfVerify(cx *EvalContext) error {
- last := len(cx.stack) - 1 // PK
+ last := len(cx.Stack) - 1 // PK
prev := last - 1 // proof
pprev := prev - 1 // data
- data := rawMessage(cx.stack[pprev].Bytes)
- proofbytes := cx.stack[prev].Bytes
+ data := rawMessage(cx.Stack[pprev].Bytes)
+ proofbytes := cx.Stack[prev].Bytes
var proof crypto.VrfProof
if len(proofbytes) != len(proof) {
return fmt.Errorf("vrf proof wrong size %d != %d", len(proofbytes), len(proof))
}
copy(proof[:], proofbytes[:])
- pubkeybytes := cx.stack[last].Bytes
+ pubkeybytes := cx.Stack[last].Bytes
var pubkey crypto.VrfPubkey
if len(pubkeybytes) != len(pubkey) {
return fmt.Errorf("vrf pubkey wrong size %d != %d", len(pubkeybytes), len(pubkey))
@@ -5787,9 +5757,9 @@ func opVrfVerify(cx *EvalContext) error {
return fmt.Errorf("unsupported vrf_verify standard %s", std)
}
- cx.stack[pprev].Bytes = output[:]
- cx.stack[prev] = boolToSV(verified)
- cx.stack = cx.stack[:last] // pop 1 because we take 3 args and return 2
+ cx.Stack[pprev].Bytes = output[:]
+ cx.Stack[prev] = boolToSV(verified)
+ cx.Stack = cx.Stack[:last] // pop 1 because we take 3 args and return 2
return nil
}
@@ -5813,8 +5783,8 @@ func (cx *EvalContext) availableRound(r uint64) (basics.Round, error) {
}
func opBlock(cx *EvalContext) error {
- last := len(cx.stack) - 1 // round
- round, err := cx.availableRound(cx.stack[last].Uint)
+ last := len(cx.Stack) - 1 // round
+ round, err := cx.availableRound(cx.Stack[last].Uint)
if err != nil {
return err
}
@@ -5831,14 +5801,14 @@ func opBlock(cx *EvalContext) error {
switch fs.field {
case BlkSeed:
- cx.stack[last].Bytes = hdr.Seed[:]
+ cx.Stack[last].Bytes = hdr.Seed[:]
return nil
case BlkTimestamp:
- cx.stack[last].Bytes = nil
+ cx.Stack[last].Bytes = nil
if hdr.TimeStamp < 0 {
return fmt.Errorf("block(%d) timestamp %d < 0", round, hdr.TimeStamp)
}
- cx.stack[last].Uint = uint64(hdr.TimeStamp)
+ cx.Stack[last].Uint = uint64(hdr.TimeStamp)
return nil
default:
return fmt.Errorf("invalid block field %d", fs.field)
@@ -5898,7 +5868,7 @@ func base64padded(encoded []byte) bool {
}
func opBase64Decode(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
encodingField := Base64Encoding(cx.program[cx.pc+1])
fs, ok := base64EncodingSpecByField(encodingField)
if !ok || fs.version > cx.version {
@@ -5909,7 +5879,7 @@ func opBase64Decode(cx *EvalContext) error {
if encodingField == StdEncoding {
encoding = base64.StdEncoding
}
- encoded := cx.stack[last].Bytes
+ encoded := cx.Stack[last].Bytes
if !base64padded(encoded) {
encoding = encoding.WithPadding(base64.NoPadding)
}
@@ -5917,7 +5887,7 @@ func opBase64Decode(cx *EvalContext) error {
if err != nil {
return err
}
- cx.stack[last].Bytes = bytes
+ cx.Stack[last].Bytes = bytes
return nil
}
@@ -5968,9 +5938,9 @@ func parseJSON(jsonText []byte) (map[string]json.RawMessage, error) {
func opJSONRef(cx *EvalContext) error {
// get json key
- last := len(cx.stack) - 1
- key := string(cx.stack[last].Bytes)
- cx.stack = cx.stack[:last] // pop
+ last := len(cx.Stack) - 1
+ key := string(cx.Stack[last].Bytes)
+ cx.Stack = cx.Stack[:last] // pop
expectedType := JSONRefType(cx.program[cx.pc+1])
fs, ok := jsonRefSpecByField(expectedType)
@@ -5979,8 +5949,8 @@ func opJSONRef(cx *EvalContext) error {
}
// parse json text
- last = len(cx.stack) - 1
- parsed, err := parseJSON(cx.stack[last].Bytes)
+ last = len(cx.Stack) - 1
+ parsed, err := parseJSON(cx.Stack[last].Bytes)
if err != nil {
return fmt.Errorf("error while parsing JSON text, %v", err)
}
@@ -5992,7 +5962,7 @@ func opJSONRef(cx *EvalContext) error {
// if the key is not found, first check whether the JSON text is the null value
// by checking whether it is a primitive JSON value. Any other primitive
// (or array) would have thrown an error previously during `parseJSON`.
- isPrimitive, err := isPrimitiveJSON(cx.stack[last].Bytes)
+ isPrimitive, err := isPrimitiveJSON(cx.Stack[last].Bytes)
if err == nil && isPrimitive {
err = fmt.Errorf("invalid json text, only json object is allowed")
}
@@ -6028,6 +5998,6 @@ func opJSONRef(cx *EvalContext) error {
default:
return fmt.Errorf("unsupported json_ref return type %s", expectedType)
}
- cx.stack[last] = stval
+ cx.Stack[last] = stval
return nil
}
diff --git a/data/transactions/logic/evalAppTxn_test.go b/data/transactions/logic/evalAppTxn_test.go
index c72236a87..f33fce350 100644
--- a/data/transactions/logic/evalAppTxn_test.go
+++ b/data/transactions/logic/evalAppTxn_test.go
@@ -112,7 +112,7 @@ func TestFieldTypes(t *testing.T) {
TestApp(t, NoTrack("itxn_begin; byte \"\"; itxn_field AssetSender;"), ep, "not an address")
// can't really tell if it's an addres, so 32 bytes gets further
TestApp(t, "itxn_begin; byte \"01234567890123456789012345678901\"; itxn_field AssetReceiver; int 1",
- ep, "invalid Account reference")
+ ep, "unavailable Account")
// but a b32 string rep is not an account
TestApp(t, NoTrack("itxn_begin; byte \"GAYTEMZUGU3DOOBZGAYTEMZUGU3DOOBZGAYTEMZUGU3DOOBZGAYZIZD42E\"; itxn_field AssetCloseTo;"),
ep, "not an address")
@@ -128,6 +128,71 @@ func TestFieldTypes(t *testing.T) {
TestApp(t, NoTrack("itxn_begin; byte \"pay\"; itxn_field Nonparticipation;"), ep, "not a uint64")
}
+func TestFieldLimits(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ ep, _, _ := MakeSampleEnv()
+
+ intProgram := "itxn_begin; int %d; itxn_field %s; int 1"
+ goodInt := func(field string, value interface{}) {
+ TestApp(t, fmt.Sprintf(intProgram, value, field), ep)
+ }
+ badInt := func(field string, value interface{}) {
+ // error messages are different for different fields, just use a space
+ // to indicate there should be an error, it will surely match any error.
+ TestApp(t, NoTrack(fmt.Sprintf(intProgram, value, field)), ep, " ")
+ }
+ testInt := func(field string, max int) {
+ goodInt(field, 1)
+ goodInt(field, max)
+ badInt(field, max+1)
+ }
+ testBool := func(field string) {
+ goodInt(field, 0)
+ goodInt(field, 1)
+ badInt(field, 2)
+ }
+ bytesProgram := "itxn_begin; byte %#v; itxn_field %s; int 1"
+ goodBytes := func(field string, value string) {
+ TestApp(t, fmt.Sprintf(bytesProgram, value, field), ep)
+ }
+ badBytes := func(field string, value string) {
+ // error messages are different for different fields, just use a space
+ // to indicate there should be an error, it will surely match any error.
+ TestApp(t, NoTrack(fmt.Sprintf(bytesProgram, value, field)), ep, " ")
+ }
+ testBytes := func(field string, maxLen int) {
+ goodBytes(field, "")
+ goodBytes(field, strings.Repeat("a", maxLen))
+ badBytes(field, strings.Repeat("a", maxLen+1))
+ }
+
+ // header
+ badInt("TypeEnum", 0)
+ testInt("TypeEnum", len(TxnTypeNames)-1)
+ //keyreg
+ testBool("Nonparticipation")
+ //acfg
+ goodInt("ConfigAssetTotal", 1)
+ goodInt("ConfigAssetTotal", uint64(1<<63))
+ goodInt("ConfigAssetDecimals", 0)
+ testInt("ConfigAssetDecimals", int(ep.Proto.MaxAssetDecimals))
+ testBool("ConfigAssetDefaultFrozen")
+ testBytes("ConfigAssetUnitName", ep.Proto.MaxAssetUnitNameBytes)
+ testBytes("ConfigAssetName", ep.Proto.MaxAssetNameBytes)
+ testBytes("ConfigAssetURL", ep.Proto.MaxAssetURLBytes)
+ //afrz
+ testBool("FreezeAssetFrozen")
+ // appl
+ testInt("OnCompletion", len(OnCompletionNames)-1)
+ testInt("LocalNumUint", int(ep.Proto.MaxLocalSchemaEntries))
+ testInt("LocalNumByteSlice", int(ep.Proto.MaxLocalSchemaEntries))
+ testInt("GlobalNumUint", int(ep.Proto.MaxGlobalSchemaEntries))
+ testInt("GlobalNumByteSlice", int(ep.Proto.MaxGlobalSchemaEntries))
+ testInt("ExtraProgramPages", int(ep.Proto.MaxExtraAppProgramPages))
+}
+
func appAddr(id int) basics.Address {
return basics.AppIndex(id).Address()
}
@@ -598,18 +663,20 @@ func TestAssetCreate(t *testing.T) {
create := `
itxn_begin
- int acfg
- itxn_field TypeEnum
- int 1000000
- itxn_field ConfigAssetTotal
- int 3
- itxn_field ConfigAssetDecimals
- byte "oz"
- itxn_field ConfigAssetUnitName
- byte "Gold"
- itxn_field ConfigAssetName
- byte "https://gold.rush/"
- itxn_field ConfigAssetURL
+ int acfg; itxn_field TypeEnum
+ int 1000000; itxn_field ConfigAssetTotal
+ int 3; itxn_field ConfigAssetDecimals
+ byte "oz"; itxn_field ConfigAssetUnitName
+ byte "Gold"; itxn_field ConfigAssetName
+ byte "https://gold.rush/"; itxn_field ConfigAssetURL
+
+ // set all the addresses to something checkable
+ byte 0x01; int 31; bzero; concat; itxn_field ConfigAssetManager;
+ byte 0x02; int 31; bzero; concat; itxn_field ConfigAssetClawback;
+ byte 0x03; int 31; bzero; concat; itxn_field ConfigAssetFreeze;
+ byte 0x04; int 31; bzero; concat; itxn_field ConfigAssetReserve;
+
+ byte 0x05; int 31; bzero; concat; itxn_field ConfigAssetMetadataHash;
itxn_submit
int 1
`
@@ -620,6 +687,15 @@ func TestAssetCreate(t *testing.T) {
// Give it enough for fee. Recall that we don't check min balance at this level.
ledger.NewAccount(appAddr(888), MakeTestProto().MinTxnFee)
TestApp(t, create, ep)
+ assetID := basics.AssetIndex(ledger.Counter() - 1)
+ app, _, err := ledger.AssetParams(assetID)
+ require.NoError(t, err)
+ require.Equal(t, app.Manager, basics.Address{0x01})
+ require.Equal(t, app.Clawback, basics.Address{0x02})
+ require.Equal(t, app.Freeze, basics.Address{0x03})
+ require.Equal(t, app.Reserve, basics.Address{0x04})
+
+ require.Equal(t, app.MetadataHash, [32]byte{0x05})
})
}
@@ -889,6 +965,10 @@ txn Sender; itxn_field Receiver;
// NewAccount overwrites the existing balance
ledger.NewAccount(appAddr(888), 1000+2*MakeTestProto().MinTxnFee)
TestApp(t, "itxn_begin"+pay+"itxn_next"+pay+"itxn_submit; int 1", ep)
+ TestApp(t, "itxn_begin; itxn_begin"+pay+"itxn_next"+pay+"itxn_submit; int 1", ep,
+ "itxn_begin without itxn_submit")
+ TestApp(t, "itxn_next"+pay+"itxn_next"+pay+"itxn_submit; int 1", ep,
+ "itxn_next without itxn_begin")
}
func TestInnerFeePooling(t *testing.T) {
@@ -1000,7 +1080,7 @@ func TestApplCreation(t *testing.T) {
"too many application args")
TestApp(t, p+strings.Repeat("int 32; bzero; itxn_field Accounts;", 3)+s, ep,
- "invalid Account reference")
+ "unavailable Account")
tx.Accounts = append(tx.Accounts, basics.Address{})
TestApp(t, fmt.Sprintf(p+"%s"+s,
strings.Repeat("int 32; bzero; itxn_field Accounts;", 3)), ep)
@@ -3096,7 +3176,7 @@ func TestForeignAppAccountAccess(t *testing.T) {
// app address available starting with 7
var problem []string
if v < 7 {
- problem = []string{"invalid Account reference " + appAddr(111).String()}
+ problem = []string{"unavailable Account " + appAddr(111).String()}
}
TestApp(t, `
diff --git a/data/transactions/logic/evalCrypto_test.go b/data/transactions/logic/evalCrypto_test.go
index 65f0787a9..766fe66ba 100644
--- a/data/transactions/logic/evalCrypto_test.go
+++ b/data/transactions/logic/evalCrypto_test.go
@@ -31,6 +31,7 @@ import (
"github.com/consensys/gnark-crypto/ecc/bn254"
"github.com/stretchr/testify/require"
+ "golang.org/x/exp/slices"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/secp256k1"
@@ -331,8 +332,7 @@ load 0
byte 0x%s
==
&&`
- pkTampered1 := make([]byte, len(pk))
- copy(pkTampered1, pk)
+ pkTampered1 := slices.Clone(pk)
pkTampered1[0] = 0 // first byte is a prefix of either 0x02 or 0x03
pkTampered2 := make([]byte, len(pk)-1) // must be 33 bytes length
copy(pkTampered2, pk)
@@ -378,8 +378,7 @@ ecdsa_verify Secp256k1
s := sign[32:64]
v := int(sign[64])
- rTampered := make([]byte, len(r))
- copy(rTampered, r)
+ rTampered := slices.Clone(r)
rTampered[0] += byte(1) // intentional overflow
var verifyTests = []struct {
@@ -487,8 +486,7 @@ load 0
byte 0x%s
==
&&`
- pkTampered1 := make([]byte, len(pk))
- copy(pkTampered1, pk)
+ pkTampered1 := slices.Clone(pk)
pkTampered1[0] = 0 // first byte is a prefix of either 0x02 or 0x03
pkTampered2 := make([]byte, len(pk)-1) // must be 33 bytes length
copy(pkTampered2, pk)
@@ -533,8 +531,7 @@ ecdsa_verify Secp256r1
r := ri.Bytes()
s := si.Bytes()
- rTampered := make([]byte, len(r))
- copy(rTampered, r)
+ rTampered := slices.Clone(r)
rTampered[0] += byte(1) // intentional overflow
var verifyTests = []struct {
diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go
index 90e0c3863..17759ef67 100644
--- a/data/transactions/logic/evalStateful_test.go
+++ b/data/transactions/logic/evalStateful_test.go
@@ -19,6 +19,7 @@ package logic
import (
"encoding/hex"
"fmt"
+ "strconv"
"strings"
"testing"
@@ -56,6 +57,10 @@ func makeSampleEnvWithVersion(version uint64) (*EvalParams, *transactions.Transa
if version >= appsEnabledVersion {
firstTxn.Txn.Type = protocol.ApplicationCallTx
}
+ // avoid putting in a RekeyTo field if version < rekeyingEnabledVersion
+ if version < rekeyingEnabledVersion {
+ firstTxn.Txn.RekeyTo = basics.Address{}
+ }
ep := defaultEvalParamsWithVersion(version, makeSampleTxnGroup(firstTxn)...)
ledger := NewLedger(nil)
ep.SigLedger = ledger
@@ -2844,11 +2849,11 @@ func TestReturnTypes(t *testing.T) {
ep.ioBudget = 50
cx := EvalContext{
- EvalParams: ep,
- runModeFlags: m,
- groupIndex: 1,
- txn: &ep.TxnGroup[1],
- appID: 300,
+ EvalParams: ep,
+ runMode: m,
+ groupIndex: 1,
+ txn: &ep.TxnGroup[1],
+ appID: 300,
}
// These set conditions for some ops that examine the group.
@@ -2869,9 +2874,9 @@ func TestReturnTypes(t *testing.T) {
require.NoError(t, err, "%s: %s\n%s", name, err, ep.Trace)
}
}
- require.Len(t, cx.stack, len(spec.Return.Types), "%s", ep.Trace)
+ require.Len(t, cx.Stack, len(spec.Return.Types), "%s", ep.Trace)
for i := 0; i < len(spec.Return.Types); i++ {
- stackType := cx.stack[i].stackType()
+ stackType := cx.Stack[i].stackType()
retType := spec.Return.Types[i]
require.True(
t, stackType.overlaps(retType),
@@ -3267,3 +3272,23 @@ itxn_submit
testApp(t, source, ep, "too many inner transactions 1 with 0 left")
})
}
+
+func TestTxnaLimits(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ // txna came in v2, but Apps and Assets in v3.
+ TestLogicRange(t, 3, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ testApp(t, "txna Accounts "+strconv.Itoa(len(tx.Accounts))+";len", ep)
+ testApp(t, "txna Accounts "+strconv.Itoa(len(tx.Accounts)+1)+";len", ep, "invalid Accounts index")
+
+ testApp(t, "txna Applications "+strconv.Itoa(len(tx.ForeignApps)), ep)
+ testApp(t, "txna Applications "+strconv.Itoa(len(tx.ForeignApps)+1), ep, "invalid Applications index")
+
+ // Assets and AppArgs have no implicit 0 index, so everything shifts
+ testApp(t, "txna Assets "+strconv.Itoa(len(tx.ForeignAssets)-1), ep)
+ testApp(t, "txna Assets "+strconv.Itoa(len(tx.ForeignAssets)), ep, "invalid Assets index")
+
+ testApp(t, "txna ApplicationArgs "+strconv.Itoa(len(tx.ApplicationArgs)-1)+";len", ep)
+ testApp(t, "txna ApplicationArgs "+strconv.Itoa(len(tx.ApplicationArgs))+";len", ep, "invalid ApplicationArgs index")
+ })
+}
diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go
index de99cab3a..b10c022ef 100644
--- a/data/transactions/logic/eval_test.go
+++ b/data/transactions/logic/eval_test.go
@@ -37,6 +37,8 @@ import (
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
+
+ "pgregory.net/rapid"
)
// Note that most of the tests use makeTestProto/defaultEvalParams as evaluator version so that
@@ -371,6 +373,33 @@ func TestSimpleMath(t *testing.T) {
testPanics(t, "int 1; int 2; - ; int 0; ==", 1)
}
+// TestRapidMath uses rapid.Check to be a bit more exhaustive
+func TestRapidMath(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ rapid.Check(t, func(r *rapid.T) {
+ a := rapid.Uint64().Draw(r, "a")
+ b := rapid.Uint64().Draw(r, "b")
+ sum := a + b
+ test := fmt.Sprintf("int %d; int %d; +; int %d; ==", a, b, sum)
+ if sum < a {
+ testPanics(t, test, 1)
+ } else {
+ testAccepts(t, test, 1)
+ }
+
+ diff := a - b
+ test = fmt.Sprintf("int %d; int %d; -; int %d; ==", a, b, diff)
+ if a < b {
+ testPanics(t, test, 1)
+ } else {
+ testAccepts(t, test, 1)
+ }
+
+ })
+}
+
func TestSha256EqArg(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -950,16 +979,16 @@ func TestIntcTooFar(t *testing.T) {
t.Parallel()
// Want to be super clear that intc_1 fails, whether an intcblock exists (but small) or not
- testPanics(t, "intc_1", 1)
- testPanics(t, "int 1; intc_1; pop", 1)
+ testPanics(t, "intc_1", 1, "intc 1 beyond 0 constants")
+ testPanics(t, "intcblock 7; intc_1; pop", 1, "intc 1 beyond 1 constants")
}
func TestBytecTooFar(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testPanics(t, "bytec_1; btoi", 1)
- testPanics(t, "byte 0x23; bytec_1; btoi", 1)
+ testPanics(t, "bytec_1; btoi", 1, "bytec 1 beyond 0 constants")
+ testPanics(t, "bytecblock 0x23 0x45; bytec_2; btoi", 1, "bytec 2 beyond 2 constants")
}
func TestManualCBlockEval(t *testing.T) {
@@ -967,7 +996,7 @@ func TestManualCBlockEval(t *testing.T) {
t.Parallel()
// TestManualCBlock in assembler_test.go demonstrates that these will use
- // an inserted constant block.
+ // an inserted constant block because the blocks given are in dead code.
testAccepts(t, "int 4; int 4; +; int 8; ==; return; intcblock 10", 2)
testAccepts(t, "b skip; intcblock 10; skip: int 4; int 4; +; int 8; ==;", 2)
testAccepts(t, "byte 0x2222; byte 0x2222; concat; len; int 4; ==; return; bytecblock 0x11", 2)
diff --git a/data/transactions/logic/frames.go b/data/transactions/logic/frames.go
index 4eda0e933..07a8bf665 100644
--- a/data/transactions/logic/frames.go
+++ b/data/transactions/logic/frames.go
@@ -27,8 +27,8 @@ func opProto(cx *EvalContext) error {
}
cx.fromCallsub = false
nargs := int(cx.program[cx.pc+1])
- if nargs > len(cx.stack) {
- return fmt.Errorf("callsub to proto that requires %d args with stack height %d", nargs, len(cx.stack))
+ if nargs > len(cx.Stack) {
+ return fmt.Errorf("callsub to proto that requires %d args with stack height %d", nargs, len(cx.Stack))
}
top := len(cx.callstack) - 1
cx.callstack[top].clear = true
@@ -45,24 +45,24 @@ func opFrameDig(cx *EvalContext) error {
return errors.New("frame_dig with empty callstack")
}
- frame := cx.callstack[top]
+ topFrame := cx.callstack[top]
// If proto was used, don't allow `frame_dig` to go below specified args
- if frame.clear && -int(i) > frame.args {
- return fmt.Errorf("frame_dig %d in sub with %d args", i, frame.args)
+ if topFrame.clear && -int(i) > topFrame.args {
+ return fmt.Errorf("frame_dig %d in sub with %d args", i, topFrame.args)
}
- idx := frame.height + int(i)
- if idx >= len(cx.stack) {
+ idx := topFrame.height + int(i)
+ if idx >= len(cx.Stack) {
return errors.New("frame_dig above stack")
}
if idx < 0 {
return errors.New("frame_dig below stack")
}
- cx.stack = append(cx.stack, cx.stack[idx])
+ cx.Stack = append(cx.Stack, cx.Stack[idx])
return nil
}
func opFrameBury(cx *EvalContext) error {
- last := len(cx.stack) - 1 // value
+ last := len(cx.Stack) - 1 // value
i := int8(cx.program[cx.pc+1])
top := len(cx.callstack) - 1
@@ -70,54 +70,54 @@ func opFrameBury(cx *EvalContext) error {
return errors.New("frame_bury with empty callstack")
}
- frame := cx.callstack[top]
+ topFrame := cx.callstack[top]
// If proto was used, don't allow `frame_bury` to go below specified args
- if frame.clear && -int(i) > frame.args {
- return fmt.Errorf("frame_bury %d in sub with %d args", i, frame.args)
+ if topFrame.clear && -int(i) > topFrame.args {
+ return fmt.Errorf("frame_bury %d in sub with %d args", i, topFrame.args)
}
- idx := frame.height + int(i)
+ idx := topFrame.height + int(i)
if idx >= last {
return errors.New("frame_bury above stack")
}
if idx < 0 {
return errors.New("frame_bury below stack")
}
- cx.stack[idx] = cx.stack[last]
- cx.stack = cx.stack[:last] // pop value
+ cx.Stack[idx] = cx.Stack[last]
+ cx.Stack = cx.Stack[:last] // pop value
return nil
}
func opBury(cx *EvalContext) error {
- last := len(cx.stack) - 1 // value
+ last := len(cx.Stack) - 1 // value
i := int(cx.program[cx.pc+1])
idx := last - i
if idx < 0 || idx == last {
return errors.New("bury outside stack")
}
- cx.stack[idx] = cx.stack[last]
- cx.stack = cx.stack[:last] // pop value
+ cx.Stack[idx] = cx.Stack[last]
+ cx.Stack = cx.Stack[:last] // pop value
return nil
}
func opPopN(cx *EvalContext) error {
n := cx.program[cx.pc+1]
- top := len(cx.stack) - int(n)
+ top := len(cx.Stack) - int(n)
if top < 0 {
- return fmt.Errorf("popn %d while stack contains %d", n, len(cx.stack))
+ return fmt.Errorf("popn %d while stack contains %d", n, len(cx.Stack))
}
- cx.stack = cx.stack[:top] // pop value
+ cx.Stack = cx.Stack[:top] // pop value
return nil
}
func opDupN(cx *EvalContext) error {
- last := len(cx.stack) - 1 // value
+ last := len(cx.Stack) - 1 // value
n := int(cx.program[cx.pc+1])
- finalLen := len(cx.stack) + n
+ finalLen := len(cx.Stack) + n
cx.ensureStackCap(finalLen)
for i := 0; i < n; i++ {
// There will be enough room that this will not allocate
- cx.stack = append(cx.stack, cx.stack[last])
+ cx.Stack = append(cx.Stack, cx.Stack[last])
}
return nil
}
diff --git a/data/transactions/logic/mocktracer/scenarios.go b/data/transactions/logic/mocktracer/scenarios.go
index 0b336db50..37f8ba790 100644
--- a/data/transactions/logic/mocktracer/scenarios.go
+++ b/data/transactions/logic/mocktracer/scenarios.go
@@ -341,7 +341,7 @@ type TestScenarioGenerator func(info TestScenarioInfo) TestScenario
// scenarios are all app calls which invoke inner transactions under various failure conditions.
// The scenarios follow this format:
//
-// 1. An app call transaction that spawns inners. They are:
+// 1. An app call transaction that spawns inners. They are:
// a. A basic app call transaction
// b. A payment transaction [grouped with c]
// c. A payment transaction [grouped with b]
diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go
index 90c52f086..f0454be6b 100644
--- a/data/transactions/logic/opcodes.go
+++ b/data/transactions/logic/opcodes.go
@@ -21,6 +21,8 @@ import (
"sort"
"strconv"
"strings"
+
+ "golang.org/x/exp/maps"
)
// LogicVersion defines default assembler and max eval versions
@@ -359,11 +361,145 @@ type typedList struct {
Effects string
}
+// debugStackExplain explains the effect of an opcode over the stack
+// with 2 integers: deletions and additions, representing pops and inserts.
+// An opcode may delete a few variables from stack, then add a few to stack.
+type debugStackExplain func(*EvalContext) (int, int)
+
// Proto describes the "stack behavior" of an opcode, what it pops as arguments
// and pushes onto the stack as return values.
type Proto struct {
Arg typedList // what gets popped from the stack
Return typedList // what gets pushed to the stack
+
+ // Explain is the pointer to the function used in debugging process during simulation:
+ // - on default construction, Explain relies on Arg and Return count.
+ // - otherwise, we need to explicitly infer from EvalContext, by registering through explain function
+ Explain debugStackExplain
+}
+
+func (p Proto) stackExplain(e debugStackExplain) Proto {
+ p.Explain = e
+ return p
+}
+
+func defaultDebugExplain(argCount, retCount int) debugStackExplain {
+ return func(_ *EvalContext) (deletions, additions int) {
+ deletions = argCount
+ additions = retCount
+ return
+ }
+}
+
+func opPushIntsStackChange(cx *EvalContext) (deletions, additions int) {
+ // NOTE: WE ARE SWALLOWING THE ERROR HERE!
+ // FOR EVENTUALLY IT WOULD ERROR IN ASSEMBLY
+ intc, _, _ := parseIntImmArgs(cx.program, cx.pc+1)
+
+ additions = len(intc)
+ return
+}
+
+func opPushBytessStackChange(cx *EvalContext) (deletions, additions int) {
+ // NOTE: WE ARE SWALLOWING THE ERROR HERE!
+ // FOR EVENTUALLY IT WOULD ERROR IN ASSEMBLY
+ cbytess, _, _ := parseByteImmArgs(cx.program, cx.pc+1)
+
+ additions = len(cbytess)
+ return
+}
+
+func opReturnStackChange(cx *EvalContext) (deletions, additions int) {
+ deletions = len(cx.Stack)
+ additions = 1
+ return
+}
+
+func opBuryStackChange(cx *EvalContext) (deletions, additions int) {
+ depth := int(cx.program[cx.pc+1])
+
+ deletions = depth + 1
+ additions = depth
+ return
+}
+
+func opPopNStackChange(cx *EvalContext) (deletions, additions int) {
+ n := int(cx.program[cx.pc+1])
+
+ deletions = n
+ return
+}
+
+func opDupNStackChange(cx *EvalContext) (deletions, additions int) {
+ n := int(cx.program[cx.pc+1])
+
+ deletions = 1
+ additions = n + 1
+ return
+}
+
+func opDigStackChange(cx *EvalContext) (deletions, additions int) {
+ additions = 1
+ return
+}
+
+func opFrameDigStackChange(cx *EvalContext) (deletions, additions int) {
+ additions = 1
+ return
+}
+
+func opCoverStackChange(cx *EvalContext) (deletions, additions int) {
+ depth := int(cx.program[cx.pc+1])
+
+ deletions = depth + 1
+ additions = depth + 1
+ return
+}
+
+func opUncoverStackChange(cx *EvalContext) (deletions, additions int) {
+ depth := int(cx.program[cx.pc+1])
+
+ deletions = depth + 1
+ additions = depth + 1
+ return
+}
+
+func opRetSubStackChange(cx *EvalContext) (deletions, additions int) {
+ topFrame := cx.callstack[len(cx.callstack)-1]
+ // fast path, no proto case
+ if !topFrame.clear {
+ return
+ }
+
+ argStart := topFrame.height - topFrame.args
+ topStackIdx := len(cx.Stack) - 1
+
+ diff := topStackIdx - argStart + 1
+
+ deletions = diff
+ additions = topFrame.returns
+ return
+}
+
+func opFrameBuryStackChange(cx *EvalContext) (deletions, additions int) {
+ topFrame := cx.callstack[len(cx.callstack)-1]
+
+ immIndex := int8(cx.program[cx.pc+1])
+ idx := topFrame.height + int(immIndex)
+ topStackIdx := len(cx.Stack) - 1
+
+ diff := topStackIdx - idx + 1
+
+ deletions = diff
+ additions = diff - 1
+ return
+}
+
+func opMatchStackChange(cx *EvalContext) (deletions, additions int) {
+ labelNum := int(cx.program[cx.pc+1])
+
+ deletions = labelNum + 1
+ return
}
func proto(signature string, effects ...string) Proto {
@@ -383,9 +519,13 @@ func proto(signature string, effects ...string) Proto {
default:
panic(effects)
}
+ argTypes := parseStackTypes(parts[0])
+ retTypes := parseStackTypes(parts[1])
+ debugExplainFunc := defaultDebugExplain(len(filterNoneTypes(argTypes)), len(filterNoneTypes(retTypes)))
return Proto{
- Arg: typedList{parseStackTypes(parts[0]), argEffect},
- Return: typedList{parseStackTypes(parts[1]), retEffect},
+ Arg: typedList{argTypes, argEffect},
+ Return: typedList{retTypes, retEffect},
+ Explain: debugExplainFunc,
}
}
@@ -518,19 +658,19 @@ var OpSpecs = []OpSpec{
{0x40, "bnz", opBnz, proto("i:"), 1, detBranch()},
{0x41, "bz", opBz, proto("i:"), 2, detBranch()},
{0x42, "b", opB, proto(":"), 2, detBranch()},
- {0x43, "return", opReturn, proto("i:x"), 2, detDefault()},
+ {0x43, "return", opReturn, proto("i:x").stackExplain(opReturnStackChange), 2, detDefault()},
{0x44, "assert", opAssert, proto("i:"), 3, detDefault()},
- {0x45, "bury", opBury, proto("a:"), fpVersion, immediates("n").typed(typeBury)},
- {0x46, "popn", opPopN, proto(":", "[N items]", ""), fpVersion, immediates("n").typed(typePopN).trust()},
- {0x47, "dupn", opDupN, proto("a:", "", "A, [N copies of A]"), fpVersion, immediates("n").typed(typeDupN).trust()},
+ {0x45, "bury", opBury, proto("a:").stackExplain(opBuryStackChange), fpVersion, immediates("n").typed(typeBury)},
+ {0x46, "popn", opPopN, proto(":", "[N items]", "").stackExplain(opPopNStackChange), fpVersion, immediates("n").typed(typePopN).trust()},
+ {0x47, "dupn", opDupN, proto("a:", "", "A, [N copies of A]").stackExplain(opDupNStackChange), fpVersion, immediates("n").typed(typeDupN).trust()},
{0x48, "pop", opPop, proto("a:"), 1, detDefault()},
{0x49, "dup", opDup, proto("a:aa", "A, A"), 1, typed(typeDup)},
{0x4a, "dup2", opDup2, proto("aa:aaaa", "A, B, A, B"), 2, typed(typeDupTwo)},
- {0x4b, "dig", opDig, proto("a:aa", "A, [N items]", "A, [N items], A"), 3, immediates("n").typed(typeDig)},
+ {0x4b, "dig", opDig, proto("a:aa", "A, [N items]", "A, [N items], A").stackExplain(opDigStackChange), 3, immediates("n").typed(typeDig)},
{0x4c, "swap", opSwap, proto("aa:aa", "B, A"), 3, typed(typeSwap)},
{0x4d, "select", opSelect, proto("aai:a", "A or B"), 3, typed(typeSelect)},
- {0x4e, "cover", opCover, proto("a:a", "[N items], A", "A, [N items]"), 5, immediates("n").typed(typeCover)},
- {0x4f, "uncover", opUncover, proto("a:a", "A, [N items]", "[N items], A"), 5, immediates("n").typed(typeUncover)},
+ {0x4e, "cover", opCover, proto("a:a", "[N items], A", "A, [N items]").stackExplain(opCoverStackChange), 5, immediates("n").typed(typeCover)},
+ {0x4f, "uncover", opUncover, proto("a:a", "A, [N items]", "[N items], A").stackExplain(opUncoverStackChange), 5, immediates("n").typed(typeUncover)},
// byteslice processing / StringOps
{0x50, "concat", opConcat, proto("bb:b"), 2, detDefault()},
@@ -578,20 +718,20 @@ var OpSpecs = []OpSpec{
// Immediate bytes and ints. Smaller code size for single use of constant.
{0x80, "pushbytes", opPushBytes, proto(":b"), 3, constants(asmPushBytes, opPushBytes, "bytes", immBytes)},
{0x81, "pushint", opPushInt, proto(":i"), 3, constants(asmPushInt, opPushInt, "uint", immInt)},
- {0x82, "pushbytess", opPushBytess, proto(":", "", "[N items]"), 8, constants(asmPushBytess, checkByteImmArgs, "bytes ...", immBytess).typed(typePushBytess).trust()},
- {0x83, "pushints", opPushInts, proto(":", "", "[N items]"), 8, constants(asmPushInts, checkIntImmArgs, "uint ...", immInts).typed(typePushInts).trust()},
+ {0x82, "pushbytess", opPushBytess, proto(":", "", "[N items]").stackExplain(opPushBytessStackChange), 8, constants(asmPushBytess, checkByteImmArgs, "bytes ...", immBytess).typed(typePushBytess).trust()},
+ {0x83, "pushints", opPushInts, proto(":", "", "[N items]").stackExplain(opPushIntsStackChange), 8, constants(asmPushInts, checkIntImmArgs, "uint ...", immInts).typed(typePushInts).trust()},
{0x84, "ed25519verify_bare", opEd25519VerifyBare, proto("bbb:T"), 7, costly(1900)},
// "Function oriented"
{0x88, "callsub", opCallSub, proto(":"), 4, detBranch()},
- {0x89, "retsub", opRetSub, proto(":"), 4, detDefault().trust()},
+ {0x89, "retsub", opRetSub, proto(":").stackExplain(opRetSubStackChange), 4, detDefault().trust()},
// protoByte is a named constant because opCallSub needs to know it.
{protoByte, "proto", opProto, proto(":"), fpVersion, immediates("a", "r").typed(typeProto)},
- {0x8b, "frame_dig", opFrameDig, proto(":a"), fpVersion, immKinded(immInt8, "i").typed(typeFrameDig)},
- {0x8c, "frame_bury", opFrameBury, proto("a:"), fpVersion, immKinded(immInt8, "i").typed(typeFrameBury)},
+ {0x8b, "frame_dig", opFrameDig, proto(":a").stackExplain(opFrameDigStackChange), fpVersion, immKinded(immInt8, "i").typed(typeFrameDig)},
+ {0x8c, "frame_bury", opFrameBury, proto("a:").stackExplain(opFrameBuryStackChange), fpVersion, immKinded(immInt8, "i").typed(typeFrameBury)},
{0x8d, "switch", opSwitch, proto("i:"), 8, detSwitch()},
- {0x8e, "match", opMatch, proto(":", "[A1, A2, ..., AN], B", ""), 8, detSwitch().trust()},
+ {0x8e, "match", opMatch, proto(":", "[A1, A2, ..., AN], B", "").stackExplain(opMatchStackChange), 8, detSwitch().trust()},
// More math
{0x90, "shl", opShiftLeft, proto("ii:i"), 4, detDefault()},
@@ -631,7 +771,7 @@ var OpSpecs = []OpSpec{
// AVM "effects"
{0xb0, "log", opLog, proto("b:"), 5, only(ModeApp)},
- {0xb1, "itxn_begin", opTxBegin, proto(":"), 5, only(ModeApp)},
+ {0xb1, "itxn_begin", opItxnBegin, proto(":"), 5, only(ModeApp)},
{0xb2, "itxn_field", opItxnField, proto("a:"), 5, immediates("f").typed(typeTxField).field("f", &TxnFields).only(ModeApp).assembler(asmItxnField)},
{0xb3, "itxn_submit", opItxnSubmit, proto(":"), 5, only(ModeApp)},
{0xb4, "itxn", opItxn, proto(":a"), 5, field("f", &TxnScalarFields).only(ModeApp).assembler(asmItxn)},
@@ -707,10 +847,7 @@ func OpcodesByVersion(version uint64) []OpSpec {
}
}
}
- result := make([]OpSpec, 0, len(subv))
- for _, v := range subv {
- result = append(result, v)
- }
+ result := maps.Values(subv)
sort.Sort(sortByOpcode(result))
return result
}
@@ -749,12 +886,8 @@ func init() {
// Start from v2 and higher,
// copy lower version opcodes and overwrite matching version
for v := uint64(2); v <= evalMaxVersion; v++ {
- OpsByName[v] = make(map[string]OpSpec, 256)
-
// Copy opcodes from lower version
- for opName, oi := range OpsByName[v-1] {
- OpsByName[v][opName] = oi
- }
+ OpsByName[v] = maps.Clone(OpsByName[v-1])
for op, oi := range opsByOpcode[v-1] {
opsByOpcode[v][op] = oi
}
diff --git a/data/transactions/logic/opcodes_test.go b/data/transactions/logic/opcodes_test.go
index 2ce3648a6..df69bbf70 100644
--- a/data/transactions/logic/opcodes_test.go
+++ b/data/transactions/logic/opcodes_test.go
@@ -23,6 +23,7 @@ import (
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/stretchr/testify/require"
+ "golang.org/x/exp/slices"
)
func TestOpSpecs(t *testing.T) {
@@ -62,11 +63,7 @@ func TestOpcodesByVersionReordered(t *testing.T) { // nolint:paralleltest // man
partitiontest.PartitionTest(t)
// Make a copy to restore to the original
- OpSpecsOrig := make([]OpSpec, len(OpSpecs))
- for idx, opspec := range OpSpecs {
- cp := opspec
- OpSpecsOrig[idx] = cp
- }
+ OpSpecsOrig := slices.Clone(OpSpecs)
defer func() {
OpSpecs = OpSpecsOrig
}()
@@ -88,11 +85,7 @@ func TestOpcodesByVersion(t *testing.T) {
func testOpcodesByVersion(t *testing.T) {
// Make a copy of the OpSpecs to check if OpcodesByVersion will change it
- OpSpecs2 := make([]OpSpec, len(OpSpecs))
- for idx, opspec := range OpSpecs {
- cp := opspec
- OpSpecs2[idx] = cp
- }
+ OpSpecs2 := slices.Clone(OpSpecs)
opSpecs := make([][]OpSpec, LogicVersion)
for v := uint64(1); v <= LogicVersion; v++ {
diff --git a/data/transactions/logic/pairing.go b/data/transactions/logic/pairing.go
index 315caac70..2988c35ec 100644
--- a/data/transactions/logic/pairing.go
+++ b/data/transactions/logic/pairing.go
@@ -65,10 +65,10 @@ func bN254G1ToBytes(g1 *bn254.G1Affine) (ret []byte) {
}
func opBn256Add(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- aBytes := cx.stack[prev].Bytes
- bBytes := cx.stack[last].Bytes
+ aBytes := cx.Stack[prev].Bytes
+ bBytes := cx.Stack[last].Bytes
if len(aBytes) != 64 || len(bBytes) != 64 {
return errors.New("expect G1 in 64 bytes")
}
@@ -76,40 +76,40 @@ func opBn256Add(cx *EvalContext) error {
b := bytesToBN254G1(bBytes)
res := new(bn254.G1Affine).Add(&a, &b)
resBytes := bN254G1ToBytes(res)
- cx.stack = cx.stack[:last]
- cx.stack[prev].Bytes = resBytes
+ cx.Stack = cx.Stack[:last]
+ cx.Stack[prev].Bytes = resBytes
return nil
}
func opBn256ScalarMul(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- aBytes := cx.stack[prev].Bytes
+ aBytes := cx.Stack[prev].Bytes
if len(aBytes) != 64 {
return errors.New("expect G1 in 64 bytes")
}
a := bytesToBN254G1(aBytes)
- kBytes := cx.stack[last].Bytes
+ kBytes := cx.Stack[last].Bytes
k := new(big.Int).SetBytes(kBytes[:])
res := new(bn254.G1Affine).ScalarMultiplication(&a, k)
resBytes := bN254G1ToBytes(res)
- cx.stack = cx.stack[:last]
- cx.stack[prev].Bytes = resBytes
+ cx.Stack = cx.Stack[:last]
+ cx.Stack[prev].Bytes = resBytes
return nil
}
func opBn256Pairing(cx *EvalContext) error {
- last := len(cx.stack) - 1
+ last := len(cx.Stack) - 1
prev := last - 1
- g1Bytes := cx.stack[prev].Bytes
- g2Bytes := cx.stack[last].Bytes
+ g1Bytes := cx.Stack[prev].Bytes
+ g2Bytes := cx.Stack[last].Bytes
g1 := bytesToBN254G1s(g1Bytes)
g2 := bytesToBN254G2s(g2Bytes)
ok, err := bn254.PairingCheck(g1, g2)
if err != nil {
return errors.New("pairing failed")
}
- cx.stack = cx.stack[:last]
- cx.stack[prev] = boolToSV(ok)
+ cx.Stack = cx.Stack[:last]
+ cx.Stack[prev] = boolToSV(ok)
return nil
}
diff --git a/data/transactions/logic/resources_test.go b/data/transactions/logic/resources_test.go
index d0112d9ac..bd5e5e17c 100644
--- a/data/transactions/logic/resources_test.go
+++ b/data/transactions/logic/resources_test.go
@@ -347,7 +347,7 @@ int 1`
ledger.NewAccount(appAddr(888), 50_000)
// First show that we're not just letting anything get passed in
logic.TestApp(t, fmt.Sprintf(callWithAccount, "int 32; bzero; byte 0x07; b|"), ep,
- "invalid Account reference AAAAA")
+ "unavailable Account AAAAA")
// Now show we can pass our own address
logic.TestApp(t, fmt.Sprintf(callWithAccount, "global CurrentApplicationAddress"), ep)
// Or the address of one of our ForeignApps
@@ -434,7 +434,7 @@ func TestOtherTxSharing(t *testing.T) {
pop; pop; int 1
`
- t.Run("keyreg", func(t *testing.T) {
+ t.Run("keyreg", func(t *testing.T) { // nolint:paralleltest // shares `ledger`
appl.ApplicationArgs = [][]byte{senderAcct[:], {200}}
logic.TestApps(t, []string{"", holdingAccess}, txntest.Group(&keyreg, &appl), 9, ledger,
logic.Exp(1, "unavailable Asset 200"))
@@ -443,7 +443,7 @@ func TestOtherTxSharing(t *testing.T) {
logic.TestApps(t, []string{"", holdingAccess}, txntest.Group(&keyreg, &withRef), 9, ledger,
logic.Exp(1, "unavailable Holding "+senderAcct.String()))
})
- t.Run("pay", func(t *testing.T) {
+ t.Run("pay", func(t *testing.T) { // nolint:paralleltest // shares `ledger`
// The receiver is available for algo balance reading
appl.ApplicationArgs = [][]byte{receiverAcct[:]}
logic.TestApps(t, []string{"", receiverBalance}, txntest.Group(&pay, &appl), 9, ledger)
@@ -459,14 +459,14 @@ func TestOtherTxSharing(t *testing.T) {
logic.TestApps(t, []string{"", otherBalance}, txntest.Group(&withClose, &appl), 9, ledger)
})
- t.Run("acfg", func(t *testing.T) {
+ t.Run("acfg", func(t *testing.T) { // nolint:paralleltest // shares `ledger`
// The other account is not available even though it's all the extra addresses
appl.ApplicationArgs = [][]byte{otherAcct[:]}
logic.TestApps(t, []string{"", otherBalance}, txntest.Group(&acfg, &appl), 9, ledger,
logic.Exp(1, "invalid Account reference "+otherAcct.String()))
})
- t.Run("axfer", func(t *testing.T) {
+ t.Run("axfer", func(t *testing.T) { // nolint:paralleltest // shares `ledger`
// The receiver is also available for algo balance reading
appl.ApplicationArgs = [][]byte{receiverAcct[:]}
logic.TestApps(t, []string{"", receiverBalance}, txntest.Group(&axfer, &appl), 9, ledger)
@@ -501,7 +501,7 @@ func TestOtherTxSharing(t *testing.T) {
logic.TestApps(t, []string{"", holdingAccess}, txntest.Group(&withClose, &appl), 9, ledger)
})
- t.Run("afrz", func(t *testing.T) {
+ t.Run("afrz", func(t *testing.T) { // nolint:paralleltest // shares `ledger`
// The other account is available (for algo and asset)
appl.ApplicationArgs = [][]byte{otherAcct[:], {byte(afrz.FreezeAsset)}}
logic.TestApps(t, []string{"", otherBalance}, txntest.Group(&afrz, &appl), 9, ledger)
@@ -576,7 +576,7 @@ int 1
// And needs some ASAs for inner axfer testing
ledger.NewHolding(appAcct, asa1, 1_000_000, false)
- t.Run("keyreg", func(t *testing.T) {
+ t.Run("keyreg", func(t *testing.T) { // nolint:paralleltest // shares `ledger`
keyreg := txntest.Txn{
Type: protocol.KeyRegistrationTx,
Sender: senderAcct,
@@ -586,15 +586,15 @@ int 1
appl.ApplicationArgs = [][]byte{senderAcct[:]}
logic.TestApps(t, []string{"", payToArg}, txntest.Group(&keyreg, &appl), 9, ledger)
logic.TestApps(t, []string{"", payToArg}, txntest.Group(&keyreg, &appl), 8, ledger,
- logic.Exp(1, "invalid Account reference "+senderAcct.String()))
+ logic.Exp(1, "unavailable Account "+senderAcct.String()))
// confirm you can't just pay _anybody_. receiverAcct is not in use at all.
appl.ApplicationArgs = [][]byte{receiverAcct[:]}
logic.TestApps(t, []string{"", payToArg}, txntest.Group(&keyreg, &appl), 9, ledger,
- logic.Exp(1, "invalid Account reference "+receiverAcct.String()))
+ logic.Exp(1, "unavailable Account "+receiverAcct.String()))
})
- t.Run("pay", func(t *testing.T) {
+ t.Run("pay", func(t *testing.T) { // nolint:paralleltest // shares `ledger`
pay := txntest.Txn{
Type: protocol.PaymentTx,
Sender: senderAcct,
@@ -605,20 +605,20 @@ int 1
appl.ApplicationArgs = [][]byte{senderAcct[:]}
logic.TestApps(t, []string{"", payToArg}, txntest.Group(&pay, &appl), 9, ledger)
logic.TestApps(t, []string{"", payToArg}, txntest.Group(&pay, &appl), 8, ledger,
- logic.Exp(1, "invalid Account reference "+senderAcct.String()))
+ logic.Exp(1, "unavailable Account "+senderAcct.String()))
appl.ApplicationArgs = [][]byte{receiverAcct[:]}
logic.TestApps(t, []string{"", payToArg}, txntest.Group(&pay, &appl), 9, ledger)
logic.TestApps(t, []string{"", payToArg}, txntest.Group(&pay, &appl), 8, ledger,
- logic.Exp(1, "invalid Account reference "+receiverAcct.String()))
+ logic.Exp(1, "unavailable Account "+receiverAcct.String()))
// confirm you can't just pay _anybody_. otherAcct is not in use at all.
appl.ApplicationArgs = [][]byte{otherAcct[:]}
logic.TestApps(t, []string{"", payToArg}, txntest.Group(&pay, &appl), 9, ledger,
- logic.Exp(1, "invalid Account reference "+otherAcct.String()))
+ logic.Exp(1, "unavailable Account "+otherAcct.String()))
})
- t.Run("axfer", func(t *testing.T) {
+ t.Run("axfer", func(t *testing.T) { // nolint:paralleltest // shares `ledger`
axfer := txntest.Txn{
Type: protocol.AssetTransferTx,
XferAsset: asa1,
@@ -631,7 +631,7 @@ int 1
appl.ApplicationArgs = [][]byte{senderAcct[:], {asa1}}
logic.TestApps(t, []string{"", payToArg}, txntest.Group(&axfer, &appl), 9, ledger)
logic.TestApps(t, []string{"", payToArg}, txntest.Group(&axfer, &appl), 8, ledger,
- logic.Exp(1, "invalid Account reference "+senderAcct.String()))
+ logic.Exp(1, "unavailable Account "+senderAcct.String()))
// but can't axfer to sender, because appAcct doesn't have holding access for the asa
logic.TestApps(t, []string{"", axferToArgs}, txntest.Group(&axfer, &appl), 9, ledger,
logic.Exp(1, "unavailable Holding"))
@@ -662,7 +662,7 @@ int 1
// or correct asset to an unknown address
appl.ApplicationArgs = [][]byte{unusedAcct[:], {asa1}}
logic.TestApps(t, []string{"", axferToArgs}, txntest.Group(&axfer, &appl), 9, ledger,
- logic.Exp(1, "invalid Account reference"))
+ logic.Exp(1, "unavailable Account"))
// appl can acfg the asset from tx0 (which requires asset available, not holding)
appl.ApplicationArgs = [][]byte{{asa1}}
@@ -694,7 +694,7 @@ int 1
logic.Exp(2, "unavailable Holding "+payAcct.String()))
})
- t.Run("afrz", func(t *testing.T) {
+ t.Run("afrz", func(t *testing.T) { // nolint:paralleltest // shares `ledger`
appl.ForeignAssets = []basics.AssetIndex{} // reset after previous tests
afrz := txntest.Txn{
Type: protocol.AssetFreezeTx,
@@ -722,9 +722,9 @@ int 1
// and not to the receiver which isn't in afrz
appl.ApplicationArgs = [][]byte{receiverAcct[:], {asa1}}
logic.TestApps(t, []string{payToArg}, txntest.Group(&appl, &afrz), 9, ledger,
- logic.Exp(0, "invalid Account reference "+receiverAcct.String()))
+ logic.Exp(0, "unavailable Account "+receiverAcct.String()))
logic.TestApps(t, []string{axferToArgs}, txntest.Group(&appl, &afrz), 9, ledger,
- logic.Exp(0, "invalid Account reference "+receiverAcct.String()))
+ logic.Exp(0, "unavailable Account "+receiverAcct.String()))
// otherAcct is the afrz target, it's holding and account are available
appl.ApplicationArgs = [][]byte{otherAcct[:], {asa1}}
@@ -750,7 +750,7 @@ int 1
})
- t.Run("appl", func(t *testing.T) {
+ t.Run("appl", func(t *testing.T) { // nolint:paralleltest // shares `ledger`
appl.ForeignAssets = []basics.AssetIndex{} // reset after previous test
appl.Accounts = []basics.Address{} // reset after previous tests
appl0 := txntest.Txn{
@@ -764,7 +764,7 @@ int 1
appl.ApplicationArgs = [][]byte{otherAcct[:], {asa1}}
logic.TestApps(t, []string{"", payToArg}, txntest.Group(&appl0, &appl), 9, ledger)
logic.TestApps(t, []string{"", payToArg}, txntest.Group(&appl0, &appl), 8, ledger, // version 8 does not get sharing
- logic.Exp(1, "invalid Account reference "+otherAcct.String()))
+ logic.Exp(1, "unavailable Account "+otherAcct.String()))
// appl can (almost) axfer asa1 to the otherAcct because both are in tx0
logic.TestApps(t, []string{"", axferToArgs}, txntest.Group(&appl0, &appl), 9, ledger,
logic.Exp(1, "axfer Sender: unavailable Holding"))
@@ -817,7 +817,7 @@ int 1
// when the inner program is v8, it can't perform the pay
appl.ApplicationArgs = [][]byte{{88}, otherAcct[:], {asa1}}
logic.TestApps(t, []string{"", innerCall}, txntest.Group(&appl0, &appl), 9, ledger,
- logic.Exp(1, "invalid Account reference "+otherAcct.String()))
+ logic.Exp(1, "unavailable Account "+otherAcct.String()))
// unless the caller passes in the account, but it can't pass the
// account because that also would give the called app access to the
// passed account's local state (which isn't available to the caller)
diff --git a/data/transactions/logic/tracer.go b/data/transactions/logic/tracer.go
index 4b4c5f580..5894409ba 100644
--- a/data/transactions/logic/tracer.go
+++ b/data/transactions/logic/tracer.go
@@ -34,87 +34,87 @@ import (
// state will not change between hook calls. This decision was made in an effort to reduce the
// performance impact of tracers.
//
-// LOGICSIG LIFECYCLE GRAPH
-// ┌─────────────────────────┐
-// │ LogicSig Evaluation │
-// ├─────────────────────────┤
-// │ > BeforeProgram │
-// │ │
-// │ ┌───────────────────┐ │
-// │ │ Teal Operation │ │
-// │ ├───────────────────┤ │
-// │ │ > BeforeOpcode │ │
-// │ │ │ │
-// │ │ > AfterOpcode │ │
-// │ └───────────────────┘ │
-// | ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ │
-// │ │
-// │ > AfterProgram │
-// └─────────────────────────┘
+// LOGICSIG LIFECYCLE GRAPH
+// ┌─────────────────────────┐
+// │ LogicSig Evaluation │
+// ├─────────────────────────┤
+// │ > BeforeProgram │
+// │ │
+// │ ┌───────────────────┐ │
+// │ │ Teal Operation │ │
+// │ ├───────────────────┤ │
+// │ │ > BeforeOpcode │ │
+// │ │ │ │
+// │ │ > AfterOpcode │ │
+// │ └───────────────────┘ │
+// | ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ │
+// │ │
+// │ > AfterProgram │
+// └─────────────────────────┘
//
-// APP LIFECYCLE GRAPH
-// ┌──────────────────────────────────────────────────────┐
-// │ Transaction Evaluation │
-// ├──────────────────────────────────────────────────────┤
-// │ > BeforeTxnGroup │
-// │ │
-// │ ┌────────────────────────────────────────────────┐ │
-// │ │ > BeforeTxn │ │
-// │ │ │ │
-// │ │ ┌──────────────────────────────────────────┐ │ │
-// │ │ │ ? App Call │ │ │
-// │ │ ├──────────────────────────────────────────┤ │ │
-// │ │ │ > BeforeProgram │ │ │
-// │ │ │ │ │ │
-// │ │ │ ┌────────────────────────────────────┐ │ │ │
-// │ │ │ │ Teal Operation │ │ │ │
-// │ │ │ ├────────────────────────────────────┤ │ │ │
-// │ │ │ │ > BeforeOpcode │ │ │ │
-// │ │ │ │ ┌──────────────────────────────┐ │ │ │ │
-// │ │ │ │ │ ? Inner Transaction Group │ │ │ │ │
-// │ │ │ │ ├──────────────────────────────┤ │ │ │ │
-// │ │ │ │ │ > BeforeTxnGroup │ │ │ │ │
-// │ │ │ │ │ ┌────────────────────────┐ │ │ │ │ │
-// │ │ │ │ │ │ Transaction Evaluation │ │ │ │ │ │
-// │ │ │ │ │ ├────────────────────────┤ │ │ │ │ │
-// │ │ │ │ │ │ ... │ │ │ │ │ │
-// │ │ │ │ │ └────────────────────────┘ │ │ │ │ │
-// │ │ │ │ │ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ │ │ │ │ │
-// │ │ │ │ │ │ │ │ │ │
-// │ │ │ │ │ > AfterTxnGroup │ │ │ │ │
-// │ │ │ │ └──────────────────────────────┘ │ │ │ │
-// │ │ │ │ > AfterOpcode │ │ │ │
-// │ │ │ └────────────────────────────────────┘ │ │ │
-// │ │ │ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ │ │ │
-// │ │ │ │ │ │
-// │ │ │ > AfterProgram │ │ │
-// │ │ └──────────────────────────────────────────┘ │ │
-// | | ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ │ |
-// │ │ │ │
-// │ │ > AfterTxn │ │
-// │ └────────────────────────────────────────────────┘ │
-// | ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ |
-// │ │
-// │ > AfterTxnGroup │
-// └──────────────────────────────────────────────────────┘
+// APP LIFECYCLE GRAPH
+// ┌──────────────────────────────────────────────────────┐
+// │ Transaction Evaluation │
+// ├──────────────────────────────────────────────────────┤
+// │ > BeforeTxnGroup │
+// │ │
+// │ ┌────────────────────────────────────────────────┐ │
+// │ │ > BeforeTxn │ │
+// │ │ │ │
+// │ │ ┌──────────────────────────────────────────┐ │ │
+// │ │ │ ? App Call │ │ │
+// │ │ ├──────────────────────────────────────────┤ │ │
+// │ │ │ > BeforeProgram │ │ │
+// │ │ │ │ │ │
+// │ │ │ ┌────────────────────────────────────┐ │ │ │
+// │ │ │ │ Teal Operation │ │ │ │
+// │ │ │ ├────────────────────────────────────┤ │ │ │
+// │ │ │ │ > BeforeOpcode │ │ │ │
+// │ │ │ │ ┌──────────────────────────────┐ │ │ │ │
+// │ │ │ │ │ ? Inner Transaction Group │ │ │ │ │
+// │ │ │ │ ├──────────────────────────────┤ │ │ │ │
+// │ │ │ │ │ > BeforeTxnGroup │ │ │ │ │
+// │ │ │ │ │ ┌────────────────────────┐ │ │ │ │ │
+// │ │ │ │ │ │ Transaction Evaluation │ │ │ │ │ │
+// │ │ │ │ │ ├────────────────────────┤ │ │ │ │ │
+// │ │ │ │ │ │ ... │ │ │ │ │ │
+// │ │ │ │ │ └────────────────────────┘ │ │ │ │ │
+// │ │ │ │ │ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ │ │ │ │ │
+// │ │ │ │ │ │ │ │ │ │
+// │ │ │ │ │ > AfterTxnGroup │ │ │ │ │
+// │ │ │ │ └──────────────────────────────┘ │ │ │ │
+// │ │ │ │ > AfterOpcode │ │ │ │
+// │ │ │ └────────────────────────────────────┘ │ │ │
+// │ │ │ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ │ │ │
+// │ │ │ │ │ │
+// │ │ │ > AfterProgram │ │ │
+// │ │ └──────────────────────────────────────────┘ │ │
+// | | ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ │ |
+// │ │ │ │
+// │ │ > AfterTxn │ │
+// │ └────────────────────────────────────────────────┘ │
+// | ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ ⁞ |
+// │ │
+// │ > AfterTxnGroup │
+// └──────────────────────────────────────────────────────┘
//
-// Block Lifecycle Graph
-// ┌──────────────────────────────────────────────────────┐
-// │ Block Evaluation │
-// │ ┌────────────────────────────────────────────────┐ │
-// │ │ > BeforeBlock │ │
-// │ │ │ │
-// │ │ ┌──────────────────────────────────────────┐ │ │
-// │ │ │ > Transaction/LogicSig Lifecycle │ │ │
-// │ │ ├──────────────────────────────────────────┤ │ │
-// │ │ │ ┌────────────────────────────────────┐ │ │ │
-// │ │ │ │ ... │ │ │ │
-// │ │ │ └────────────────────────────────────┘ │ │ │
-// │ │ └──────────────────────────────────────────┘ │ │
-// │ ├────────────────────────────────────────────────│ │
-// │ │ > AfterBlock │ │
-// │ └────────────────────────────────────────────────┘ │
-// └──────────────────────────────────────────────────────┘
+// Block Lifecycle Graph
+// ┌──────────────────────────────────────────────────────┐
+// │ Block Evaluation │
+// │ ┌────────────────────────────────────────────────┐ │
+// │ │ > BeforeBlock │ │
+// │ │ │ │
+// │ │ ┌──────────────────────────────────────────┐ │ │
+// │ │ │ > Transaction/LogicSig Lifecycle │ │ │
+// │ │ ├──────────────────────────────────────────┤ │ │
+// │ │ │ ┌────────────────────────────────────┐ │ │ │
+// │ │ │ │ ... │ │ │ │
+// │ │ │ └────────────────────────────────────┘ │ │ │
+// │ │ └──────────────────────────────────────────┘ │ │
+// │ ├────────────────────────────────────────────────│ │
+// │ │ > AfterBlock │ │
+// │ └────────────────────────────────────────────────┘ │
+// └──────────────────────────────────────────────────────┘
type EvalTracer interface {
// BeforeBlock is called once at the beginning of block evaluation. It is passed the block header.
BeforeBlock(hdr *bookkeeping.BlockHeader)
diff --git a/data/transactions/msgp_gen.go b/data/transactions/msgp_gen.go
index 053330615..1dd4a0930 100644
--- a/data/transactions/msgp_gen.go
+++ b/data/transactions/msgp_gen.go
@@ -9,7 +9,11 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklesignature"
+ "github.com/algorand/go-algorand/crypto/stateproof"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/stateproofmsg"
+ "github.com/algorand/go-algorand/protocol"
)
// The following msgp objects are implemented in this file:
@@ -20,6 +24,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> ApplicationCallTxnFieldsMaxSize()
//
// ApplyData
// |-----> (*) MarshalMsg
@@ -28,6 +33,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> ApplyDataMaxSize()
//
// AssetConfigTxnFields
// |-----> (*) MarshalMsg
@@ -36,6 +42,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> AssetConfigTxnFieldsMaxSize()
//
// AssetFreezeTxnFields
// |-----> (*) MarshalMsg
@@ -44,6 +51,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> AssetFreezeTxnFieldsMaxSize()
//
// AssetTransferTxnFields
// |-----> (*) MarshalMsg
@@ -52,6 +60,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> AssetTransferTxnFieldsMaxSize()
//
// BoxRef
// |-----> (*) MarshalMsg
@@ -60,6 +69,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> BoxRefMaxSize()
//
// EvalDelta
// |-----> (*) MarshalMsg
@@ -68,6 +78,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> EvalDeltaMaxSize()
//
// Header
// |-----> (*) MarshalMsg
@@ -76,6 +87,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> HeaderMaxSize()
//
// KeyregTxnFields
// |-----> (*) MarshalMsg
@@ -84,6 +96,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> KeyregTxnFieldsMaxSize()
//
// LogicSig
// |-----> (*) MarshalMsg
@@ -92,6 +105,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> LogicSigMaxSize()
//
// OnCompletion
// |-----> MarshalMsg
@@ -100,6 +114,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> OnCompletionMaxSize()
//
// PaymentTxnFields
// |-----> (*) MarshalMsg
@@ -108,6 +123,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> PaymentTxnFieldsMaxSize()
//
// Payset
// |-----> MarshalMsg
@@ -116,6 +132,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> PaysetMaxSize()
//
// SignedTxn
// |-----> (*) MarshalMsg
@@ -124,6 +141,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> SignedTxnMaxSize()
//
// SignedTxnInBlock
// |-----> (*) MarshalMsg
@@ -132,6 +150,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> SignedTxnInBlockMaxSize()
//
// SignedTxnWithAD
// |-----> (*) MarshalMsg
@@ -140,6 +159,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> SignedTxnWithADMaxSize()
//
// StateProofTxnFields
// |-----> (*) MarshalMsg
@@ -148,6 +168,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> StateProofTxnFieldsMaxSize()
//
// Transaction
// |-----> (*) MarshalMsg
@@ -156,6 +177,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> TransactionMaxSize()
//
// TxGroup
// |-----> (*) MarshalMsg
@@ -164,6 +186,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> TxGroupMaxSize()
//
// Txid
// |-----> (*) MarshalMsg
@@ -173,6 +196,9 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// crypto.Digest
+// |-----> crypto.DigestMaxSize()
+//
// MarshalMsg implements msgp.Marshaler
func (z *ApplicationCallTxnFields) MarshalMsg(b []byte) (o []byte) {
@@ -518,6 +544,16 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
}
if zb0017 > 0 {
zb0017--
+ var zb0019 int
+ zb0019, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004, "struct-from-array", "Name")
+ return
+ }
+ if zb0019 > config.MaxBytesKeyValueLen {
+ err = msgp.ErrOverflow(uint64(zb0019), uint64(config.MaxBytesKeyValueLen))
+ return
+ }
(*z).Boxes[zb0004].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Boxes[zb0004].Name)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004, "struct-from-array", "Name")
@@ -554,6 +590,16 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
return
}
case "n":
+ var zb0020 int
+ zb0020, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004, "Name")
+ return
+ }
+ if zb0020 > config.MaxBytesKeyValueLen {
+ err = msgp.ErrOverflow(uint64(zb0020), uint64(config.MaxBytesKeyValueLen))
+ return
+ }
(*z).Boxes[zb0004].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Boxes[zb0004].Name)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0004, "Name")
@@ -572,24 +618,24 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
}
if zb0006 > 0 {
zb0006--
- var zb0019 int
- var zb0020 bool
- zb0019, zb0020, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0021 int
+ var zb0022 bool
+ zb0021, zb0022, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
return
}
- if zb0019 > encodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0019), uint64(encodedMaxForeignAssets))
+ if zb0021 > encodedMaxForeignAssets {
+ err = msgp.ErrOverflow(uint64(zb0021), uint64(encodedMaxForeignAssets))
err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
return
}
- if zb0020 {
+ if zb0022 {
(*z).ForeignAssets = nil
- } else if (*z).ForeignAssets != nil && cap((*z).ForeignAssets) >= zb0019 {
- (*z).ForeignAssets = ((*z).ForeignAssets)[:zb0019]
+ } else if (*z).ForeignAssets != nil && cap((*z).ForeignAssets) >= zb0021 {
+ (*z).ForeignAssets = ((*z).ForeignAssets)[:zb0021]
} else {
- (*z).ForeignAssets = make([]basics.AssetIndex, zb0019)
+ (*z).ForeignAssets = make([]basics.AssetIndex, zb0021)
}
for zb0005 := range (*z).ForeignAssets {
bts, err = (*z).ForeignAssets[zb0005].UnmarshalMsg(bts)
@@ -617,14 +663,14 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
}
if zb0006 > 0 {
zb0006--
- var zb0021 int
- zb0021, err = msgp.ReadBytesBytesHeader(bts)
+ var zb0023 int
+ zb0023, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram")
return
}
- if zb0021 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0021), uint64(config.MaxAvailableAppProgramLen))
+ if zb0023 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0023), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ApprovalProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApprovalProgram)
@@ -635,14 +681,14 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
}
if zb0006 > 0 {
zb0006--
- var zb0022 int
- zb0022, err = msgp.ReadBytesBytesHeader(bts)
+ var zb0024 int
+ zb0024, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram")
return
}
- if zb0022 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0022), uint64(config.MaxAvailableAppProgramLen))
+ if zb0024 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0024), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ClearStateProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ClearStateProgram)
@@ -690,33 +736,33 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
}
case "apan":
{
- var zb0023 uint64
- zb0023, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0025 uint64
+ zb0025, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "OnCompletion")
return
}
- (*z).OnCompletion = OnCompletion(zb0023)
+ (*z).OnCompletion = OnCompletion(zb0025)
}
case "apaa":
- var zb0024 int
- var zb0025 bool
- zb0024, zb0025, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0026 int
+ var zb0027 bool
+ zb0026, zb0027, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ApplicationArgs")
return
}
- if zb0024 > encodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0024), uint64(encodedMaxApplicationArgs))
+ if zb0026 > encodedMaxApplicationArgs {
+ err = msgp.ErrOverflow(uint64(zb0026), uint64(encodedMaxApplicationArgs))
err = msgp.WrapError(err, "ApplicationArgs")
return
}
- if zb0025 {
+ if zb0027 {
(*z).ApplicationArgs = nil
- } else if (*z).ApplicationArgs != nil && cap((*z).ApplicationArgs) >= zb0024 {
- (*z).ApplicationArgs = ((*z).ApplicationArgs)[:zb0024]
+ } else if (*z).ApplicationArgs != nil && cap((*z).ApplicationArgs) >= zb0026 {
+ (*z).ApplicationArgs = ((*z).ApplicationArgs)[:zb0026]
} else {
- (*z).ApplicationArgs = make([][]byte, zb0024)
+ (*z).ApplicationArgs = make([][]byte, zb0026)
}
for zb0001 := range (*z).ApplicationArgs {
(*z).ApplicationArgs[zb0001], bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationArgs[zb0001])
@@ -726,24 +772,24 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
}
}
case "apat":
- var zb0026 int
- var zb0027 bool
- zb0026, zb0027, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0028 int
+ var zb0029 bool
+ zb0028, zb0029, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Accounts")
return
}
- if zb0026 > encodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0026), uint64(encodedMaxAccounts))
+ if zb0028 > encodedMaxAccounts {
+ err = msgp.ErrOverflow(uint64(zb0028), uint64(encodedMaxAccounts))
err = msgp.WrapError(err, "Accounts")
return
}
- if zb0027 {
+ if zb0029 {
(*z).Accounts = nil
- } else if (*z).Accounts != nil && cap((*z).Accounts) >= zb0026 {
- (*z).Accounts = ((*z).Accounts)[:zb0026]
+ } else if (*z).Accounts != nil && cap((*z).Accounts) >= zb0028 {
+ (*z).Accounts = ((*z).Accounts)[:zb0028]
} else {
- (*z).Accounts = make([]basics.Address, zb0026)
+ (*z).Accounts = make([]basics.Address, zb0028)
}
for zb0002 := range (*z).Accounts {
bts, err = (*z).Accounts[zb0002].UnmarshalMsg(bts)
@@ -753,24 +799,24 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
}
}
case "apfa":
- var zb0028 int
- var zb0029 bool
- zb0028, zb0029, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0030 int
+ var zb0031 bool
+ zb0030, zb0031, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ForeignApps")
return
}
- if zb0028 > encodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0028), uint64(encodedMaxForeignApps))
+ if zb0030 > encodedMaxForeignApps {
+ err = msgp.ErrOverflow(uint64(zb0030), uint64(encodedMaxForeignApps))
err = msgp.WrapError(err, "ForeignApps")
return
}
- if zb0029 {
+ if zb0031 {
(*z).ForeignApps = nil
- } else if (*z).ForeignApps != nil && cap((*z).ForeignApps) >= zb0028 {
- (*z).ForeignApps = ((*z).ForeignApps)[:zb0028]
+ } else if (*z).ForeignApps != nil && cap((*z).ForeignApps) >= zb0030 {
+ (*z).ForeignApps = ((*z).ForeignApps)[:zb0030]
} else {
- (*z).ForeignApps = make([]basics.AppIndex, zb0028)
+ (*z).ForeignApps = make([]basics.AppIndex, zb0030)
}
for zb0003 := range (*z).ForeignApps {
bts, err = (*z).ForeignApps[zb0003].UnmarshalMsg(bts)
@@ -780,53 +826,63 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
}
}
case "apbx":
- var zb0030 int
- var zb0031 bool
- zb0030, zb0031, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0032 int
+ var zb0033 bool
+ zb0032, zb0033, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Boxes")
return
}
- if zb0030 > encodedMaxBoxes {
- err = msgp.ErrOverflow(uint64(zb0030), uint64(encodedMaxBoxes))
+ if zb0032 > encodedMaxBoxes {
+ err = msgp.ErrOverflow(uint64(zb0032), uint64(encodedMaxBoxes))
err = msgp.WrapError(err, "Boxes")
return
}
- if zb0031 {
+ if zb0033 {
(*z).Boxes = nil
- } else if (*z).Boxes != nil && cap((*z).Boxes) >= zb0030 {
- (*z).Boxes = ((*z).Boxes)[:zb0030]
+ } else if (*z).Boxes != nil && cap((*z).Boxes) >= zb0032 {
+ (*z).Boxes = ((*z).Boxes)[:zb0032]
} else {
- (*z).Boxes = make([]BoxRef, zb0030)
+ (*z).Boxes = make([]BoxRef, zb0032)
}
for zb0004 := range (*z).Boxes {
- var zb0032 int
- var zb0033 bool
- zb0032, zb0033, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0034 int
+ var zb0035 bool
+ zb0034, zb0035, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0032, zb0033, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0034, zb0035, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Boxes", zb0004)
return
}
- if zb0032 > 0 {
- zb0032--
+ if zb0034 > 0 {
+ zb0034--
(*z).Boxes[zb0004].Index, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Boxes", zb0004, "struct-from-array", "Index")
return
}
}
- if zb0032 > 0 {
- zb0032--
+ if zb0034 > 0 {
+ zb0034--
+ var zb0036 int
+ zb0036, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0004, "struct-from-array", "Name")
+ return
+ }
+ if zb0036 > config.MaxBytesKeyValueLen {
+ err = msgp.ErrOverflow(uint64(zb0036), uint64(config.MaxBytesKeyValueLen))
+ return
+ }
(*z).Boxes[zb0004].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Boxes[zb0004].Name)
if err != nil {
err = msgp.WrapError(err, "Boxes", zb0004, "struct-from-array", "Name")
return
}
}
- if zb0032 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0032)
+ if zb0034 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0034)
if err != nil {
err = msgp.WrapError(err, "Boxes", zb0004, "struct-from-array")
return
@@ -837,11 +893,11 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
err = msgp.WrapError(err, "Boxes", zb0004)
return
}
- if zb0033 {
+ if zb0035 {
(*z).Boxes[zb0004] = BoxRef{}
}
- for zb0032 > 0 {
- zb0032--
+ for zb0034 > 0 {
+ zb0034--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "Boxes", zb0004)
@@ -855,6 +911,16 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
return
}
case "n":
+ var zb0037 int
+ zb0037, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0004, "Name")
+ return
+ }
+ if zb0037 > config.MaxBytesKeyValueLen {
+ err = msgp.ErrOverflow(uint64(zb0037), uint64(config.MaxBytesKeyValueLen))
+ return
+ }
(*z).Boxes[zb0004].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Boxes[zb0004].Name)
if err != nil {
err = msgp.WrapError(err, "Boxes", zb0004, "Name")
@@ -871,24 +937,24 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
}
}
case "apas":
- var zb0034 int
- var zb0035 bool
- zb0034, zb0035, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0038 int
+ var zb0039 bool
+ zb0038, zb0039, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ForeignAssets")
return
}
- if zb0034 > encodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0034), uint64(encodedMaxForeignAssets))
+ if zb0038 > encodedMaxForeignAssets {
+ err = msgp.ErrOverflow(uint64(zb0038), uint64(encodedMaxForeignAssets))
err = msgp.WrapError(err, "ForeignAssets")
return
}
- if zb0035 {
+ if zb0039 {
(*z).ForeignAssets = nil
- } else if (*z).ForeignAssets != nil && cap((*z).ForeignAssets) >= zb0034 {
- (*z).ForeignAssets = ((*z).ForeignAssets)[:zb0034]
+ } else if (*z).ForeignAssets != nil && cap((*z).ForeignAssets) >= zb0038 {
+ (*z).ForeignAssets = ((*z).ForeignAssets)[:zb0038]
} else {
- (*z).ForeignAssets = make([]basics.AssetIndex, zb0034)
+ (*z).ForeignAssets = make([]basics.AssetIndex, zb0038)
}
for zb0005 := range (*z).ForeignAssets {
bts, err = (*z).ForeignAssets[zb0005].UnmarshalMsg(bts)
@@ -910,14 +976,14 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
return
}
case "apap":
- var zb0036 int
- zb0036, err = msgp.ReadBytesBytesHeader(bts)
+ var zb0040 int
+ zb0040, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "ApprovalProgram")
return
}
- if zb0036 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0036), uint64(config.MaxAvailableAppProgramLen))
+ if zb0040 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0040), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ApprovalProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApprovalProgram)
@@ -926,14 +992,14 @@ func (z *ApplicationCallTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error
return
}
case "apsu":
- var zb0037 int
- zb0037, err = msgp.ReadBytesBytesHeader(bts)
+ var zb0041 int
+ zb0041, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "ClearStateProgram")
return
}
- if zb0037 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0037), uint64(config.MaxAvailableAppProgramLen))
+ if zb0041 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0041), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ClearStateProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ClearStateProgram)
@@ -996,6 +1062,26 @@ func (z *ApplicationCallTxnFields) MsgIsZero() bool {
return ((*z).ApplicationID.MsgIsZero()) && ((*z).OnCompletion == 0) && (len((*z).ApplicationArgs) == 0) && (len((*z).Accounts) == 0) && (len((*z).ForeignApps) == 0) && (len((*z).Boxes) == 0) && (len((*z).ForeignAssets) == 0) && ((*z).LocalStateSchema.MsgIsZero()) && ((*z).GlobalStateSchema.MsgIsZero()) && (len((*z).ApprovalProgram) == 0) && (len((*z).ClearStateProgram) == 0) && ((*z).ExtraProgramPages == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func ApplicationCallTxnFieldsMaxSize() (s int) {
+ s = 1 + 5 + basics.AppIndexMaxSize() + 5 + msgp.Uint64Size + 5
+ // Calculating size of slice: z.ApplicationArgs
+ s += msgp.ArrayHeaderSize + config.MaxAppTotalArgLen + 5
+ // Calculating size of slice: z.Accounts
+ s += msgp.ArrayHeaderSize + ((encodedMaxAccounts) * (basics.AddressMaxSize()))
+ s += 5
+ // Calculating size of slice: z.ForeignApps
+ s += msgp.ArrayHeaderSize + ((encodedMaxForeignApps) * (basics.AppIndexMaxSize()))
+ s += 5
+ // Calculating size of slice: z.Boxes
+ s += msgp.ArrayHeaderSize + ((encodedMaxBoxes) * (BoxRefMaxSize()))
+ s += 5
+ // Calculating size of slice: z.ForeignAssets
+ s += msgp.ArrayHeaderSize + ((encodedMaxForeignAssets) * (basics.AssetIndexMaxSize()))
+ s += 5 + basics.StateSchemaMaxSize() + 5 + basics.StateSchemaMaxSize() + 5 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 5 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 5 + msgp.Uint32Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *ApplyData) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1263,6 +1349,12 @@ func (z *ApplyData) MsgIsZero() bool {
return ((*z).ClosingAmount.MsgIsZero()) && ((*z).AssetClosingAmount == 0) && ((*z).SenderRewards.MsgIsZero()) && ((*z).ReceiverRewards.MsgIsZero()) && ((*z).CloseRewards.MsgIsZero()) && ((*z).EvalDelta.MsgIsZero()) && ((*z).ConfigAsset.MsgIsZero()) && ((*z).ApplicationID.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func ApplyDataMaxSize() (s int) {
+ s = 1 + 3 + basics.MicroAlgosMaxSize() + 4 + msgp.Uint64Size + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + EvalDeltaMaxSize() + 5 + basics.AssetIndexMaxSize() + 5 + basics.AppIndexMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *AssetConfigTxnFields) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1392,6 +1484,12 @@ func (z *AssetConfigTxnFields) MsgIsZero() bool {
return ((*z).ConfigAsset.MsgIsZero()) && ((*z).AssetParams.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func AssetConfigTxnFieldsMaxSize() (s int) {
+ s = 1 + 5 + basics.AssetIndexMaxSize() + 5 + basics.AssetParamsMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *AssetFreezeTxnFields) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1544,6 +1642,12 @@ func (z *AssetFreezeTxnFields) MsgIsZero() bool {
return ((*z).FreezeAccount.MsgIsZero()) && ((*z).FreezeAsset.MsgIsZero()) && ((*z).AssetFrozen == false)
}
+// MaxSize returns a maximum valid message size for this message type
+func AssetFreezeTxnFieldsMaxSize() (s int) {
+ s = 1 + 5 + basics.AddressMaxSize() + 5 + basics.AssetIndexMaxSize() + 5 + msgp.BoolSize
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *AssetTransferTxnFields) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1742,6 +1846,12 @@ func (z *AssetTransferTxnFields) MsgIsZero() bool {
return ((*z).XferAsset.MsgIsZero()) && ((*z).AssetAmount == 0) && ((*z).AssetSender.MsgIsZero()) && ((*z).AssetReceiver.MsgIsZero()) && ((*z).AssetCloseTo.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func AssetTransferTxnFieldsMaxSize() (s int) {
+ s = 1 + 5 + basics.AssetIndexMaxSize() + 5 + msgp.Uint64Size + 5 + basics.AddressMaxSize() + 5 + basics.AddressMaxSize() + 7 + basics.AddressMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *BoxRef) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1801,6 +1911,16 @@ func (z *BoxRef) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0001 > 0 {
zb0001--
+ var zb0003 int
+ zb0003, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Name")
+ return
+ }
+ if zb0003 > config.MaxBytesKeyValueLen {
+ err = msgp.ErrOverflow(uint64(zb0003), uint64(config.MaxBytesKeyValueLen))
+ return
+ }
(*z).Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Name)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Name")
@@ -1837,6 +1957,16 @@ func (z *BoxRef) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "n":
+ var zb0004 int
+ zb0004, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Name")
+ return
+ }
+ if zb0004 > config.MaxBytesKeyValueLen {
+ err = msgp.ErrOverflow(uint64(zb0004), uint64(config.MaxBytesKeyValueLen))
+ return
+ }
(*z).Name, bts, err = msgp.ReadBytesBytes(bts, (*z).Name)
if err != nil {
err = msgp.WrapError(err, "Name")
@@ -1871,6 +2001,12 @@ func (z *BoxRef) MsgIsZero() bool {
return ((*z).Index == 0) && (len((*z).Name) == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func BoxRefMaxSize() (s int) {
+ s = 1 + 2 + msgp.Uint64Size + 2 + msgp.BytesPrefixSize + config.MaxBytesKeyValueLen
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *EvalDelta) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2306,6 +2442,25 @@ func (z *EvalDelta) MsgIsZero() bool {
return ((*z).GlobalDelta.MsgIsZero()) && (len((*z).LocalDeltas) == 0) && (len((*z).SharedAccts) == 0) && (len((*z).Logs) == 0) && (len((*z).InnerTxns) == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func EvalDeltaMaxSize() (s int) {
+ s = 1 + 3 + basics.StateDeltaMaxSize() + 3
+ s += msgp.MapHeaderSize
+ // Adding size of map keys for z.LocalDeltas
+ s += config.MaxEvalDeltaAccounts * (msgp.Uint64Size)
+ // Adding size of map values for z.LocalDeltas
+ s += config.MaxEvalDeltaAccounts * (basics.StateDeltaMaxSize())
+ s += 3
+ // Calculating size of slice: z.SharedAccts
+ s += msgp.ArrayHeaderSize + ((config.MaxEvalDeltaAccounts) * (basics.AddressMaxSize()))
+ s += 3
+ // Calculating size of slice: z.Logs
+ s += msgp.ArrayHeaderSize + (config.MaxLogCalls * msgp.StringPrefixSize) + config.MaxEvalDeltaTotalLogSize + 4
+ // Calculating size of slice: z.InnerTxns
+ s += msgp.ArrayHeaderSize + ((config.MaxInnerTransactionsPerDelta) * (SignedTxnWithADMaxSize()))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *Header) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2479,6 +2634,16 @@ func (z *Header) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0002 > 0 {
zb0002--
+ var zb0005 int
+ zb0005, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "GenesisID")
+ return
+ }
+ if zb0005 > config.MaxGenesisIDLen {
+ err = msgp.ErrOverflow(uint64(zb0005), uint64(config.MaxGenesisIDLen))
+ return
+ }
(*z).GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisID")
@@ -2565,14 +2730,14 @@ func (z *Header) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "note":
- var zb0005 int
- zb0005, err = msgp.ReadBytesBytesHeader(bts)
+ var zb0006 int
+ zb0006, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "Note")
return
}
- if zb0005 > config.MaxTxnNoteBytes {
- err = msgp.ErrOverflow(uint64(zb0005), uint64(config.MaxTxnNoteBytes))
+ if zb0006 > config.MaxTxnNoteBytes {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(config.MaxTxnNoteBytes))
return
}
(*z).Note, bts, err = msgp.ReadBytesBytes(bts, (*z).Note)
@@ -2581,6 +2746,16 @@ func (z *Header) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "gen":
+ var zb0007 int
+ zb0007, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "GenesisID")
+ return
+ }
+ if zb0007 > config.MaxGenesisIDLen {
+ err = msgp.ErrOverflow(uint64(zb0007), uint64(config.MaxGenesisIDLen))
+ return
+ }
(*z).GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "GenesisID")
@@ -2639,6 +2814,15 @@ func (z *Header) MsgIsZero() bool {
return ((*z).Sender.MsgIsZero()) && ((*z).Fee.MsgIsZero()) && ((*z).FirstValid.MsgIsZero()) && ((*z).LastValid.MsgIsZero()) && (len((*z).Note) == 0) && ((*z).GenesisID == "") && ((*z).GenesisHash.MsgIsZero()) && ((*z).Group.MsgIsZero()) && ((*z).Lease == ([32]byte{})) && ((*z).RekeyTo.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func HeaderMaxSize() (s int) {
+ s = 1 + 4 + basics.AddressMaxSize() + 4 + basics.MicroAlgosMaxSize() + 3 + basics.RoundMaxSize() + 3 + basics.RoundMaxSize() + 5 + msgp.BytesPrefixSize + config.MaxTxnNoteBytes + 4 + msgp.StringPrefixSize + config.MaxGenesisIDLen + 3 + crypto.DigestMaxSize() + 4 + crypto.DigestMaxSize() + 3
+ // Calculating size of array: z.Lease
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ s += 6 + basics.AddressMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *KeyregTxnFields) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2883,6 +3067,12 @@ func (z *KeyregTxnFields) MsgIsZero() bool {
return ((*z).VotePK.MsgIsZero()) && ((*z).SelectionPK.MsgIsZero()) && ((*z).StateProofPK.MsgIsZero()) && ((*z).VoteFirst.MsgIsZero()) && ((*z).VoteLast.MsgIsZero()) && ((*z).VoteKeyDilution == 0) && ((*z).Nonparticipation == false)
}
+// MaxSize returns a maximum valid message size for this message type
+func KeyregTxnFieldsMaxSize() (s int) {
+ s = 1 + 8 + crypto.OneTimeSignatureVerifierMaxSize() + 7 + crypto.VRFVerifierMaxSize() + 8 + merklesignature.CommitmentMaxSize() + 8 + basics.RoundMaxSize() + 8 + basics.RoundMaxSize() + 7 + msgp.Uint64Size + 8 + msgp.BoolSize
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *LogicSig) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -3150,6 +3340,14 @@ func (z *LogicSig) MsgIsZero() bool {
return (len((*z).Logic) == 0) && ((*z).Sig.MsgIsZero()) && ((*z).Msig.MsgIsZero()) && (len((*z).Args) == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func LogicSigMaxSize() (s int) {
+ s = 1 + 2 + msgp.BytesPrefixSize + config.MaxLogicSigMaxSize + 4 + crypto.SignatureMaxSize() + 5 + crypto.MultisigSigMaxSize() + 4
+ // Calculating size of slice: z.Args
+ s += msgp.ArrayHeaderSize + ((EvalMaxArgs) * (msgp.BytesPrefixSize + config.MaxLogicSigMaxSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z OnCompletion) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -3196,6 +3394,12 @@ func (z OnCompletion) MsgIsZero() bool {
return z == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func OnCompletionMaxSize() (s int) {
+ s = msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *PaymentTxnFields) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -3348,6 +3552,12 @@ func (z *PaymentTxnFields) MsgIsZero() bool {
return ((*z).Receiver.MsgIsZero()) && ((*z).Amount.MsgIsZero()) && ((*z).CloseRemainderTo.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func PaymentTxnFieldsMaxSize() (s int) {
+ s = 1 + 4 + basics.AddressMaxSize() + 4 + basics.MicroAlgosMaxSize() + 6 + basics.AddressMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z Payset) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -3421,6 +3631,13 @@ func (z Payset) MsgIsZero() bool {
return len(z) == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func PaysetMaxSize() (s int) {
+ // Calculating size of slice: z
+ s += msgp.ArrayHeaderSize + ((100000) * (SignedTxnInBlockMaxSize()))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *SignedTxn) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -3619,6 +3836,12 @@ func (z *SignedTxn) MsgIsZero() bool {
return ((*z).Sig.MsgIsZero()) && ((*z).Msig.MsgIsZero()) && ((*z).Lsig.MsgIsZero()) && ((*z).Txn.MsgIsZero()) && ((*z).AuthAddr.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func SignedTxnMaxSize() (s int) {
+ s = 1 + 4 + crypto.SignatureMaxSize() + 5 + crypto.MultisigSigMaxSize() + 5 + LogicSigMaxSize() + 4 + TransactionMaxSize() + 5 + basics.AddressMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *SignedTxnInBlock) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -4047,6 +4270,12 @@ func (z *SignedTxnInBlock) MsgIsZero() bool {
return ((*z).SignedTxnWithAD.SignedTxn.Sig.MsgIsZero()) && ((*z).SignedTxnWithAD.SignedTxn.Msig.MsgIsZero()) && ((*z).SignedTxnWithAD.SignedTxn.Lsig.MsgIsZero()) && ((*z).SignedTxnWithAD.SignedTxn.Txn.MsgIsZero()) && ((*z).SignedTxnWithAD.SignedTxn.AuthAddr.MsgIsZero()) && ((*z).SignedTxnWithAD.ApplyData.ClosingAmount.MsgIsZero()) && ((*z).SignedTxnWithAD.ApplyData.AssetClosingAmount == 0) && ((*z).SignedTxnWithAD.ApplyData.SenderRewards.MsgIsZero()) && ((*z).SignedTxnWithAD.ApplyData.ReceiverRewards.MsgIsZero()) && ((*z).SignedTxnWithAD.ApplyData.CloseRewards.MsgIsZero()) && ((*z).SignedTxnWithAD.ApplyData.EvalDelta.MsgIsZero()) && ((*z).SignedTxnWithAD.ApplyData.ConfigAsset.MsgIsZero()) && ((*z).SignedTxnWithAD.ApplyData.ApplicationID.MsgIsZero()) && ((*z).HasGenesisID == false) && ((*z).HasGenesisHash == false)
}
+// MaxSize returns a maximum valid message size for this message type
+func SignedTxnInBlockMaxSize() (s int) {
+ s = 1 + 4 + crypto.SignatureMaxSize() + 5 + crypto.MultisigSigMaxSize() + 5 + LogicSigMaxSize() + 4 + TransactionMaxSize() + 5 + basics.AddressMaxSize() + 3 + basics.MicroAlgosMaxSize() + 4 + msgp.Uint64Size + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + EvalDeltaMaxSize() + 5 + basics.AssetIndexMaxSize() + 5 + basics.AppIndexMaxSize() + 4 + msgp.BoolSize + 4 + msgp.BoolSize
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *SignedTxnWithAD) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -4429,6 +4658,12 @@ func (z *SignedTxnWithAD) MsgIsZero() bool {
return ((*z).SignedTxn.Sig.MsgIsZero()) && ((*z).SignedTxn.Msig.MsgIsZero()) && ((*z).SignedTxn.Lsig.MsgIsZero()) && ((*z).SignedTxn.Txn.MsgIsZero()) && ((*z).SignedTxn.AuthAddr.MsgIsZero()) && ((*z).ApplyData.ClosingAmount.MsgIsZero()) && ((*z).ApplyData.AssetClosingAmount == 0) && ((*z).ApplyData.SenderRewards.MsgIsZero()) && ((*z).ApplyData.ReceiverRewards.MsgIsZero()) && ((*z).ApplyData.CloseRewards.MsgIsZero()) && ((*z).ApplyData.EvalDelta.MsgIsZero()) && ((*z).ApplyData.ConfigAsset.MsgIsZero()) && ((*z).ApplyData.ApplicationID.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func SignedTxnWithADMaxSize() (s int) {
+ s = 1 + 4 + crypto.SignatureMaxSize() + 5 + crypto.MultisigSigMaxSize() + 5 + LogicSigMaxSize() + 4 + TransactionMaxSize() + 5 + basics.AddressMaxSize() + 3 + basics.MicroAlgosMaxSize() + 4 + msgp.Uint64Size + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + basics.MicroAlgosMaxSize() + 3 + EvalDeltaMaxSize() + 5 + basics.AssetIndexMaxSize() + 5 + basics.AppIndexMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *StateProofTxnFields) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -4581,6 +4816,12 @@ func (z *StateProofTxnFields) MsgIsZero() bool {
return ((*z).StateProofType.MsgIsZero()) && ((*z).StateProof.MsgIsZero()) && ((*z).Message.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func StateProofTxnFieldsMaxSize() (s int) {
+ s = 1 + 7 + protocol.StateProofTypeMaxSize() + 3 + stateproof.StateProofMaxSize() + 6 + stateproofmsg.MessageMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *Transaction) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -5143,6 +5384,16 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0007 > 0 {
zb0007--
+ var zb0010 int
+ zb0010, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "GenesisID")
+ return
+ }
+ if zb0010 > config.MaxGenesisIDLen {
+ err = msgp.ErrOverflow(uint64(zb0010), uint64(config.MaxGenesisIDLen))
+ return
+ }
(*z).Header.GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GenesisID")
@@ -5352,35 +5603,35 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
if zb0007 > 0 {
zb0007--
{
- var zb0010 uint64
- zb0010, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0011 uint64
+ zb0011, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "OnCompletion")
return
}
- (*z).ApplicationCallTxnFields.OnCompletion = OnCompletion(zb0010)
+ (*z).ApplicationCallTxnFields.OnCompletion = OnCompletion(zb0011)
}
}
if zb0007 > 0 {
zb0007--
- var zb0011 int
- var zb0012 bool
- zb0011, zb0012, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0012 int
+ var zb0013 bool
+ zb0012, zb0013, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs")
return
}
- if zb0011 > encodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0011), uint64(encodedMaxApplicationArgs))
+ if zb0012 > encodedMaxApplicationArgs {
+ err = msgp.ErrOverflow(uint64(zb0012), uint64(encodedMaxApplicationArgs))
err = msgp.WrapError(err, "struct-from-array", "ApplicationArgs")
return
}
- if zb0012 {
+ if zb0013 {
(*z).ApplicationCallTxnFields.ApplicationArgs = nil
- } else if (*z).ApplicationCallTxnFields.ApplicationArgs != nil && cap((*z).ApplicationCallTxnFields.ApplicationArgs) >= zb0011 {
- (*z).ApplicationCallTxnFields.ApplicationArgs = ((*z).ApplicationCallTxnFields.ApplicationArgs)[:zb0011]
+ } else if (*z).ApplicationCallTxnFields.ApplicationArgs != nil && cap((*z).ApplicationCallTxnFields.ApplicationArgs) >= zb0012 {
+ (*z).ApplicationCallTxnFields.ApplicationArgs = ((*z).ApplicationCallTxnFields.ApplicationArgs)[:zb0012]
} else {
- (*z).ApplicationCallTxnFields.ApplicationArgs = make([][]byte, zb0011)
+ (*z).ApplicationCallTxnFields.ApplicationArgs = make([][]byte, zb0012)
}
for zb0002 := range (*z).ApplicationCallTxnFields.ApplicationArgs {
(*z).ApplicationCallTxnFields.ApplicationArgs[zb0002], bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.ApplicationArgs[zb0002])
@@ -5392,24 +5643,24 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0007 > 0 {
zb0007--
- var zb0013 int
- var zb0014 bool
- zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0014 int
+ var zb0015 bool
+ zb0014, zb0015, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Accounts")
return
}
- if zb0013 > encodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0013), uint64(encodedMaxAccounts))
+ if zb0014 > encodedMaxAccounts {
+ err = msgp.ErrOverflow(uint64(zb0014), uint64(encodedMaxAccounts))
err = msgp.WrapError(err, "struct-from-array", "Accounts")
return
}
- if zb0014 {
+ if zb0015 {
(*z).ApplicationCallTxnFields.Accounts = nil
- } else if (*z).ApplicationCallTxnFields.Accounts != nil && cap((*z).ApplicationCallTxnFields.Accounts) >= zb0013 {
- (*z).ApplicationCallTxnFields.Accounts = ((*z).ApplicationCallTxnFields.Accounts)[:zb0013]
+ } else if (*z).ApplicationCallTxnFields.Accounts != nil && cap((*z).ApplicationCallTxnFields.Accounts) >= zb0014 {
+ (*z).ApplicationCallTxnFields.Accounts = ((*z).ApplicationCallTxnFields.Accounts)[:zb0014]
} else {
- (*z).ApplicationCallTxnFields.Accounts = make([]basics.Address, zb0013)
+ (*z).ApplicationCallTxnFields.Accounts = make([]basics.Address, zb0014)
}
for zb0003 := range (*z).ApplicationCallTxnFields.Accounts {
bts, err = (*z).ApplicationCallTxnFields.Accounts[zb0003].UnmarshalMsg(bts)
@@ -5421,24 +5672,24 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0007 > 0 {
zb0007--
- var zb0015 int
- var zb0016 bool
- zb0015, zb0016, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0016 int
+ var zb0017 bool
+ zb0016, zb0017, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ForeignApps")
return
}
- if zb0015 > encodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0015), uint64(encodedMaxForeignApps))
+ if zb0016 > encodedMaxForeignApps {
+ err = msgp.ErrOverflow(uint64(zb0016), uint64(encodedMaxForeignApps))
err = msgp.WrapError(err, "struct-from-array", "ForeignApps")
return
}
- if zb0016 {
+ if zb0017 {
(*z).ApplicationCallTxnFields.ForeignApps = nil
- } else if (*z).ApplicationCallTxnFields.ForeignApps != nil && cap((*z).ApplicationCallTxnFields.ForeignApps) >= zb0015 {
- (*z).ApplicationCallTxnFields.ForeignApps = ((*z).ApplicationCallTxnFields.ForeignApps)[:zb0015]
+ } else if (*z).ApplicationCallTxnFields.ForeignApps != nil && cap((*z).ApplicationCallTxnFields.ForeignApps) >= zb0016 {
+ (*z).ApplicationCallTxnFields.ForeignApps = ((*z).ApplicationCallTxnFields.ForeignApps)[:zb0016]
} else {
- (*z).ApplicationCallTxnFields.ForeignApps = make([]basics.AppIndex, zb0015)
+ (*z).ApplicationCallTxnFields.ForeignApps = make([]basics.AppIndex, zb0016)
}
for zb0004 := range (*z).ApplicationCallTxnFields.ForeignApps {
bts, err = (*z).ApplicationCallTxnFields.ForeignApps[zb0004].UnmarshalMsg(bts)
@@ -5450,53 +5701,63 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0007 > 0 {
zb0007--
- var zb0017 int
- var zb0018 bool
- zb0017, zb0018, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0018 int
+ var zb0019 bool
+ zb0018, zb0019, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Boxes")
return
}
- if zb0017 > encodedMaxBoxes {
- err = msgp.ErrOverflow(uint64(zb0017), uint64(encodedMaxBoxes))
+ if zb0018 > encodedMaxBoxes {
+ err = msgp.ErrOverflow(uint64(zb0018), uint64(encodedMaxBoxes))
err = msgp.WrapError(err, "struct-from-array", "Boxes")
return
}
- if zb0018 {
+ if zb0019 {
(*z).ApplicationCallTxnFields.Boxes = nil
- } else if (*z).ApplicationCallTxnFields.Boxes != nil && cap((*z).ApplicationCallTxnFields.Boxes) >= zb0017 {
- (*z).ApplicationCallTxnFields.Boxes = ((*z).ApplicationCallTxnFields.Boxes)[:zb0017]
+ } else if (*z).ApplicationCallTxnFields.Boxes != nil && cap((*z).ApplicationCallTxnFields.Boxes) >= zb0018 {
+ (*z).ApplicationCallTxnFields.Boxes = ((*z).ApplicationCallTxnFields.Boxes)[:zb0018]
} else {
- (*z).ApplicationCallTxnFields.Boxes = make([]BoxRef, zb0017)
+ (*z).ApplicationCallTxnFields.Boxes = make([]BoxRef, zb0018)
}
for zb0005 := range (*z).ApplicationCallTxnFields.Boxes {
- var zb0019 int
- var zb0020 bool
- zb0019, zb0020, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0020 int
+ var zb0021 bool
+ zb0020, zb0021, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0019, zb0020, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0020, zb0021, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005)
return
}
- if zb0019 > 0 {
- zb0019--
+ if zb0020 > 0 {
+ zb0020--
(*z).ApplicationCallTxnFields.Boxes[zb0005].Index, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005, "struct-from-array", "Index")
return
}
}
- if zb0019 > 0 {
- zb0019--
+ if zb0020 > 0 {
+ zb0020--
+ var zb0022 int
+ zb0022, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005, "struct-from-array", "Name")
+ return
+ }
+ if zb0022 > config.MaxBytesKeyValueLen {
+ err = msgp.ErrOverflow(uint64(zb0022), uint64(config.MaxBytesKeyValueLen))
+ return
+ }
(*z).ApplicationCallTxnFields.Boxes[zb0005].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.Boxes[zb0005].Name)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005, "struct-from-array", "Name")
return
}
}
- if zb0019 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0019)
+ if zb0020 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0020)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005, "struct-from-array")
return
@@ -5507,11 +5768,11 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005)
return
}
- if zb0020 {
+ if zb0021 {
(*z).ApplicationCallTxnFields.Boxes[zb0005] = BoxRef{}
}
- for zb0019 > 0 {
- zb0019--
+ for zb0020 > 0 {
+ zb0020--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005)
@@ -5525,6 +5786,16 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "n":
+ var zb0023 int
+ zb0023, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005, "Name")
+ return
+ }
+ if zb0023 > config.MaxBytesKeyValueLen {
+ err = msgp.ErrOverflow(uint64(zb0023), uint64(config.MaxBytesKeyValueLen))
+ return
+ }
(*z).ApplicationCallTxnFields.Boxes[zb0005].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.Boxes[zb0005].Name)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Boxes", zb0005, "Name")
@@ -5543,24 +5814,24 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0007 > 0 {
zb0007--
- var zb0021 int
- var zb0022 bool
- zb0021, zb0022, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0024 int
+ var zb0025 bool
+ zb0024, zb0025, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
return
}
- if zb0021 > encodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0021), uint64(encodedMaxForeignAssets))
+ if zb0024 > encodedMaxForeignAssets {
+ err = msgp.ErrOverflow(uint64(zb0024), uint64(encodedMaxForeignAssets))
err = msgp.WrapError(err, "struct-from-array", "ForeignAssets")
return
}
- if zb0022 {
+ if zb0025 {
(*z).ApplicationCallTxnFields.ForeignAssets = nil
- } else if (*z).ApplicationCallTxnFields.ForeignAssets != nil && cap((*z).ApplicationCallTxnFields.ForeignAssets) >= zb0021 {
- (*z).ApplicationCallTxnFields.ForeignAssets = ((*z).ApplicationCallTxnFields.ForeignAssets)[:zb0021]
+ } else if (*z).ApplicationCallTxnFields.ForeignAssets != nil && cap((*z).ApplicationCallTxnFields.ForeignAssets) >= zb0024 {
+ (*z).ApplicationCallTxnFields.ForeignAssets = ((*z).ApplicationCallTxnFields.ForeignAssets)[:zb0024]
} else {
- (*z).ApplicationCallTxnFields.ForeignAssets = make([]basics.AssetIndex, zb0021)
+ (*z).ApplicationCallTxnFields.ForeignAssets = make([]basics.AssetIndex, zb0024)
}
for zb0006 := range (*z).ApplicationCallTxnFields.ForeignAssets {
bts, err = (*z).ApplicationCallTxnFields.ForeignAssets[zb0006].UnmarshalMsg(bts)
@@ -5588,14 +5859,14 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0007 > 0 {
zb0007--
- var zb0023 int
- zb0023, err = msgp.ReadBytesBytesHeader(bts)
+ var zb0026 int
+ zb0026, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ApprovalProgram")
return
}
- if zb0023 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0023), uint64(config.MaxAvailableAppProgramLen))
+ if zb0026 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0026), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ApplicationCallTxnFields.ApprovalProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.ApprovalProgram)
@@ -5606,14 +5877,14 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0007 > 0 {
zb0007--
- var zb0024 int
- zb0024, err = msgp.ReadBytesBytesHeader(bts)
+ var zb0027 int
+ zb0027, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "ClearStateProgram")
return
}
- if zb0024 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0024), uint64(config.MaxAvailableAppProgramLen))
+ if zb0027 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0027), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ApplicationCallTxnFields.ClearStateProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.ClearStateProgram)
@@ -5708,14 +5979,14 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "note":
- var zb0025 int
- zb0025, err = msgp.ReadBytesBytesHeader(bts)
+ var zb0028 int
+ zb0028, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "Note")
return
}
- if zb0025 > config.MaxTxnNoteBytes {
- err = msgp.ErrOverflow(uint64(zb0025), uint64(config.MaxTxnNoteBytes))
+ if zb0028 > config.MaxTxnNoteBytes {
+ err = msgp.ErrOverflow(uint64(zb0028), uint64(config.MaxTxnNoteBytes))
return
}
(*z).Header.Note, bts, err = msgp.ReadBytesBytes(bts, (*z).Header.Note)
@@ -5724,6 +5995,16 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "gen":
+ var zb0029 int
+ zb0029, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "GenesisID")
+ return
+ }
+ if zb0029 > config.MaxGenesisIDLen {
+ err = msgp.ErrOverflow(uint64(zb0029), uint64(config.MaxGenesisIDLen))
+ return
+ }
(*z).Header.GenesisID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "GenesisID")
@@ -5881,33 +6162,33 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
case "apan":
{
- var zb0026 uint64
- zb0026, bts, err = msgp.ReadUint64Bytes(bts)
+ var zb0030 uint64
+ zb0030, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "OnCompletion")
return
}
- (*z).ApplicationCallTxnFields.OnCompletion = OnCompletion(zb0026)
+ (*z).ApplicationCallTxnFields.OnCompletion = OnCompletion(zb0030)
}
case "apaa":
- var zb0027 int
- var zb0028 bool
- zb0027, zb0028, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0031 int
+ var zb0032 bool
+ zb0031, zb0032, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ApplicationArgs")
return
}
- if zb0027 > encodedMaxApplicationArgs {
- err = msgp.ErrOverflow(uint64(zb0027), uint64(encodedMaxApplicationArgs))
+ if zb0031 > encodedMaxApplicationArgs {
+ err = msgp.ErrOverflow(uint64(zb0031), uint64(encodedMaxApplicationArgs))
err = msgp.WrapError(err, "ApplicationArgs")
return
}
- if zb0028 {
+ if zb0032 {
(*z).ApplicationCallTxnFields.ApplicationArgs = nil
- } else if (*z).ApplicationCallTxnFields.ApplicationArgs != nil && cap((*z).ApplicationCallTxnFields.ApplicationArgs) >= zb0027 {
- (*z).ApplicationCallTxnFields.ApplicationArgs = ((*z).ApplicationCallTxnFields.ApplicationArgs)[:zb0027]
+ } else if (*z).ApplicationCallTxnFields.ApplicationArgs != nil && cap((*z).ApplicationCallTxnFields.ApplicationArgs) >= zb0031 {
+ (*z).ApplicationCallTxnFields.ApplicationArgs = ((*z).ApplicationCallTxnFields.ApplicationArgs)[:zb0031]
} else {
- (*z).ApplicationCallTxnFields.ApplicationArgs = make([][]byte, zb0027)
+ (*z).ApplicationCallTxnFields.ApplicationArgs = make([][]byte, zb0031)
}
for zb0002 := range (*z).ApplicationCallTxnFields.ApplicationArgs {
(*z).ApplicationCallTxnFields.ApplicationArgs[zb0002], bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.ApplicationArgs[zb0002])
@@ -5917,24 +6198,24 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
}
case "apat":
- var zb0029 int
- var zb0030 bool
- zb0029, zb0030, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0033 int
+ var zb0034 bool
+ zb0033, zb0034, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Accounts")
return
}
- if zb0029 > encodedMaxAccounts {
- err = msgp.ErrOverflow(uint64(zb0029), uint64(encodedMaxAccounts))
+ if zb0033 > encodedMaxAccounts {
+ err = msgp.ErrOverflow(uint64(zb0033), uint64(encodedMaxAccounts))
err = msgp.WrapError(err, "Accounts")
return
}
- if zb0030 {
+ if zb0034 {
(*z).ApplicationCallTxnFields.Accounts = nil
- } else if (*z).ApplicationCallTxnFields.Accounts != nil && cap((*z).ApplicationCallTxnFields.Accounts) >= zb0029 {
- (*z).ApplicationCallTxnFields.Accounts = ((*z).ApplicationCallTxnFields.Accounts)[:zb0029]
+ } else if (*z).ApplicationCallTxnFields.Accounts != nil && cap((*z).ApplicationCallTxnFields.Accounts) >= zb0033 {
+ (*z).ApplicationCallTxnFields.Accounts = ((*z).ApplicationCallTxnFields.Accounts)[:zb0033]
} else {
- (*z).ApplicationCallTxnFields.Accounts = make([]basics.Address, zb0029)
+ (*z).ApplicationCallTxnFields.Accounts = make([]basics.Address, zb0033)
}
for zb0003 := range (*z).ApplicationCallTxnFields.Accounts {
bts, err = (*z).ApplicationCallTxnFields.Accounts[zb0003].UnmarshalMsg(bts)
@@ -5944,24 +6225,24 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
}
case "apfa":
- var zb0031 int
- var zb0032 bool
- zb0031, zb0032, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0035 int
+ var zb0036 bool
+ zb0035, zb0036, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ForeignApps")
return
}
- if zb0031 > encodedMaxForeignApps {
- err = msgp.ErrOverflow(uint64(zb0031), uint64(encodedMaxForeignApps))
+ if zb0035 > encodedMaxForeignApps {
+ err = msgp.ErrOverflow(uint64(zb0035), uint64(encodedMaxForeignApps))
err = msgp.WrapError(err, "ForeignApps")
return
}
- if zb0032 {
+ if zb0036 {
(*z).ApplicationCallTxnFields.ForeignApps = nil
- } else if (*z).ApplicationCallTxnFields.ForeignApps != nil && cap((*z).ApplicationCallTxnFields.ForeignApps) >= zb0031 {
- (*z).ApplicationCallTxnFields.ForeignApps = ((*z).ApplicationCallTxnFields.ForeignApps)[:zb0031]
+ } else if (*z).ApplicationCallTxnFields.ForeignApps != nil && cap((*z).ApplicationCallTxnFields.ForeignApps) >= zb0035 {
+ (*z).ApplicationCallTxnFields.ForeignApps = ((*z).ApplicationCallTxnFields.ForeignApps)[:zb0035]
} else {
- (*z).ApplicationCallTxnFields.ForeignApps = make([]basics.AppIndex, zb0031)
+ (*z).ApplicationCallTxnFields.ForeignApps = make([]basics.AppIndex, zb0035)
}
for zb0004 := range (*z).ApplicationCallTxnFields.ForeignApps {
bts, err = (*z).ApplicationCallTxnFields.ForeignApps[zb0004].UnmarshalMsg(bts)
@@ -5971,53 +6252,63 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
}
case "apbx":
- var zb0033 int
- var zb0034 bool
- zb0033, zb0034, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0037 int
+ var zb0038 bool
+ zb0037, zb0038, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Boxes")
return
}
- if zb0033 > encodedMaxBoxes {
- err = msgp.ErrOverflow(uint64(zb0033), uint64(encodedMaxBoxes))
+ if zb0037 > encodedMaxBoxes {
+ err = msgp.ErrOverflow(uint64(zb0037), uint64(encodedMaxBoxes))
err = msgp.WrapError(err, "Boxes")
return
}
- if zb0034 {
+ if zb0038 {
(*z).ApplicationCallTxnFields.Boxes = nil
- } else if (*z).ApplicationCallTxnFields.Boxes != nil && cap((*z).ApplicationCallTxnFields.Boxes) >= zb0033 {
- (*z).ApplicationCallTxnFields.Boxes = ((*z).ApplicationCallTxnFields.Boxes)[:zb0033]
+ } else if (*z).ApplicationCallTxnFields.Boxes != nil && cap((*z).ApplicationCallTxnFields.Boxes) >= zb0037 {
+ (*z).ApplicationCallTxnFields.Boxes = ((*z).ApplicationCallTxnFields.Boxes)[:zb0037]
} else {
- (*z).ApplicationCallTxnFields.Boxes = make([]BoxRef, zb0033)
+ (*z).ApplicationCallTxnFields.Boxes = make([]BoxRef, zb0037)
}
for zb0005 := range (*z).ApplicationCallTxnFields.Boxes {
- var zb0035 int
- var zb0036 bool
- zb0035, zb0036, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0039 int
+ var zb0040 bool
+ zb0039, zb0040, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0035, zb0036, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0039, zb0040, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Boxes", zb0005)
return
}
- if zb0035 > 0 {
- zb0035--
+ if zb0039 > 0 {
+ zb0039--
(*z).ApplicationCallTxnFields.Boxes[zb0005].Index, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Boxes", zb0005, "struct-from-array", "Index")
return
}
}
- if zb0035 > 0 {
- zb0035--
+ if zb0039 > 0 {
+ zb0039--
+ var zb0041 int
+ zb0041, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0005, "struct-from-array", "Name")
+ return
+ }
+ if zb0041 > config.MaxBytesKeyValueLen {
+ err = msgp.ErrOverflow(uint64(zb0041), uint64(config.MaxBytesKeyValueLen))
+ return
+ }
(*z).ApplicationCallTxnFields.Boxes[zb0005].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.Boxes[zb0005].Name)
if err != nil {
err = msgp.WrapError(err, "Boxes", zb0005, "struct-from-array", "Name")
return
}
}
- if zb0035 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0035)
+ if zb0039 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0039)
if err != nil {
err = msgp.WrapError(err, "Boxes", zb0005, "struct-from-array")
return
@@ -6028,11 +6319,11 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "Boxes", zb0005)
return
}
- if zb0036 {
+ if zb0040 {
(*z).ApplicationCallTxnFields.Boxes[zb0005] = BoxRef{}
}
- for zb0035 > 0 {
- zb0035--
+ for zb0039 > 0 {
+ zb0039--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "Boxes", zb0005)
@@ -6046,6 +6337,16 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "n":
+ var zb0042 int
+ zb0042, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Boxes", zb0005, "Name")
+ return
+ }
+ if zb0042 > config.MaxBytesKeyValueLen {
+ err = msgp.ErrOverflow(uint64(zb0042), uint64(config.MaxBytesKeyValueLen))
+ return
+ }
(*z).ApplicationCallTxnFields.Boxes[zb0005].Name, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.Boxes[zb0005].Name)
if err != nil {
err = msgp.WrapError(err, "Boxes", zb0005, "Name")
@@ -6062,24 +6363,24 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
}
case "apas":
- var zb0037 int
- var zb0038 bool
- zb0037, zb0038, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0043 int
+ var zb0044 bool
+ zb0043, zb0044, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ForeignAssets")
return
}
- if zb0037 > encodedMaxForeignAssets {
- err = msgp.ErrOverflow(uint64(zb0037), uint64(encodedMaxForeignAssets))
+ if zb0043 > encodedMaxForeignAssets {
+ err = msgp.ErrOverflow(uint64(zb0043), uint64(encodedMaxForeignAssets))
err = msgp.WrapError(err, "ForeignAssets")
return
}
- if zb0038 {
+ if zb0044 {
(*z).ApplicationCallTxnFields.ForeignAssets = nil
- } else if (*z).ApplicationCallTxnFields.ForeignAssets != nil && cap((*z).ApplicationCallTxnFields.ForeignAssets) >= zb0037 {
- (*z).ApplicationCallTxnFields.ForeignAssets = ((*z).ApplicationCallTxnFields.ForeignAssets)[:zb0037]
+ } else if (*z).ApplicationCallTxnFields.ForeignAssets != nil && cap((*z).ApplicationCallTxnFields.ForeignAssets) >= zb0043 {
+ (*z).ApplicationCallTxnFields.ForeignAssets = ((*z).ApplicationCallTxnFields.ForeignAssets)[:zb0043]
} else {
- (*z).ApplicationCallTxnFields.ForeignAssets = make([]basics.AssetIndex, zb0037)
+ (*z).ApplicationCallTxnFields.ForeignAssets = make([]basics.AssetIndex, zb0043)
}
for zb0006 := range (*z).ApplicationCallTxnFields.ForeignAssets {
bts, err = (*z).ApplicationCallTxnFields.ForeignAssets[zb0006].UnmarshalMsg(bts)
@@ -6101,14 +6402,14 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "apap":
- var zb0039 int
- zb0039, err = msgp.ReadBytesBytesHeader(bts)
+ var zb0045 int
+ zb0045, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "ApprovalProgram")
return
}
- if zb0039 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0039), uint64(config.MaxAvailableAppProgramLen))
+ if zb0045 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0045), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ApplicationCallTxnFields.ApprovalProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.ApprovalProgram)
@@ -6117,14 +6418,14 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "apsu":
- var zb0040 int
- zb0040, err = msgp.ReadBytesBytesHeader(bts)
+ var zb0046 int
+ zb0046, err = msgp.ReadBytesBytesHeader(bts)
if err != nil {
err = msgp.WrapError(err, "ClearStateProgram")
return
}
- if zb0040 > config.MaxAvailableAppProgramLen {
- err = msgp.ErrOverflow(uint64(zb0040), uint64(config.MaxAvailableAppProgramLen))
+ if zb0046 > config.MaxAvailableAppProgramLen {
+ err = msgp.ErrOverflow(uint64(zb0046), uint64(config.MaxAvailableAppProgramLen))
return
}
(*z).ApplicationCallTxnFields.ClearStateProgram, bts, err = msgp.ReadBytesBytes(bts, (*z).ApplicationCallTxnFields.ClearStateProgram)
@@ -6205,6 +6506,29 @@ func (z *Transaction) MsgIsZero() bool {
return ((*z).Type.MsgIsZero()) && ((*z).Header.Sender.MsgIsZero()) && ((*z).Header.Fee.MsgIsZero()) && ((*z).Header.FirstValid.MsgIsZero()) && ((*z).Header.LastValid.MsgIsZero()) && (len((*z).Header.Note) == 0) && ((*z).Header.GenesisID == "") && ((*z).Header.GenesisHash.MsgIsZero()) && ((*z).Header.Group.MsgIsZero()) && ((*z).Header.Lease == ([32]byte{})) && ((*z).Header.RekeyTo.MsgIsZero()) && ((*z).KeyregTxnFields.VotePK.MsgIsZero()) && ((*z).KeyregTxnFields.SelectionPK.MsgIsZero()) && ((*z).KeyregTxnFields.StateProofPK.MsgIsZero()) && ((*z).KeyregTxnFields.VoteFirst.MsgIsZero()) && ((*z).KeyregTxnFields.VoteLast.MsgIsZero()) && ((*z).KeyregTxnFields.VoteKeyDilution == 0) && ((*z).KeyregTxnFields.Nonparticipation == false) && ((*z).PaymentTxnFields.Receiver.MsgIsZero()) && ((*z).PaymentTxnFields.Amount.MsgIsZero()) && ((*z).PaymentTxnFields.CloseRemainderTo.MsgIsZero()) && ((*z).AssetConfigTxnFields.ConfigAsset.MsgIsZero()) && ((*z).AssetConfigTxnFields.AssetParams.MsgIsZero()) && ((*z).AssetTransferTxnFields.XferAsset.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetAmount == 0) && ((*z).AssetTransferTxnFields.AssetSender.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetReceiver.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetCloseTo.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAccount.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAsset.MsgIsZero()) && ((*z).AssetFreezeTxnFields.AssetFrozen == false) && ((*z).ApplicationCallTxnFields.ApplicationID.MsgIsZero()) && ((*z).ApplicationCallTxnFields.OnCompletion == 0) && (len((*z).ApplicationCallTxnFields.ApplicationArgs) == 0) && (len((*z).ApplicationCallTxnFields.Accounts) == 0) && (len((*z).ApplicationCallTxnFields.ForeignApps) == 0) && (len((*z).ApplicationCallTxnFields.Boxes) == 0) && (len((*z).ApplicationCallTxnFields.ForeignAssets) == 0) && ((*z).ApplicationCallTxnFields.LocalStateSchema.MsgIsZero()) && ((*z).ApplicationCallTxnFields.GlobalStateSchema.MsgIsZero()) && (len((*z).ApplicationCallTxnFields.ApprovalProgram) == 0) && (len((*z).ApplicationCallTxnFields.ClearStateProgram) == 0) && ((*z).ApplicationCallTxnFields.ExtraProgramPages == 0) && ((*z).StateProofTxnFields.StateProofType.MsgIsZero()) && ((*z).StateProofTxnFields.StateProof.MsgIsZero()) && ((*z).StateProofTxnFields.Message.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func TransactionMaxSize() (s int) {
+ s = 3 + 5 + protocol.TxTypeMaxSize() + 4 + basics.AddressMaxSize() + 4 + basics.MicroAlgosMaxSize() + 3 + basics.RoundMaxSize() + 3 + basics.RoundMaxSize() + 5 + msgp.BytesPrefixSize + config.MaxTxnNoteBytes + 4 + msgp.StringPrefixSize + config.MaxGenesisIDLen + 3 + crypto.DigestMaxSize() + 4 + crypto.DigestMaxSize() + 3
+ // Calculating size of array: z.Header.Lease
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ s += 6 + basics.AddressMaxSize() + 8 + crypto.OneTimeSignatureVerifierMaxSize() + 7 + crypto.VRFVerifierMaxSize() + 8 + merklesignature.CommitmentMaxSize() + 8 + basics.RoundMaxSize() + 8 + basics.RoundMaxSize() + 7 + msgp.Uint64Size + 8 + msgp.BoolSize + 4 + basics.AddressMaxSize() + 4 + basics.MicroAlgosMaxSize() + 6 + basics.AddressMaxSize() + 5 + basics.AssetIndexMaxSize() + 5 + basics.AssetParamsMaxSize() + 5 + basics.AssetIndexMaxSize() + 5 + msgp.Uint64Size + 5 + basics.AddressMaxSize() + 5 + basics.AddressMaxSize() + 7 + basics.AddressMaxSize() + 5 + basics.AddressMaxSize() + 5 + basics.AssetIndexMaxSize() + 5 + msgp.BoolSize + 5 + basics.AppIndexMaxSize() + 5 + msgp.Uint64Size + 5
+ // Calculating size of slice: z.ApplicationCallTxnFields.ApplicationArgs
+ s += msgp.ArrayHeaderSize + config.MaxAppTotalArgLen + 5
+ // Calculating size of slice: z.ApplicationCallTxnFields.Accounts
+ s += msgp.ArrayHeaderSize + ((encodedMaxAccounts) * (basics.AddressMaxSize()))
+ s += 5
+ // Calculating size of slice: z.ApplicationCallTxnFields.ForeignApps
+ s += msgp.ArrayHeaderSize + ((encodedMaxForeignApps) * (basics.AppIndexMaxSize()))
+ s += 5
+ // Calculating size of slice: z.ApplicationCallTxnFields.Boxes
+ s += msgp.ArrayHeaderSize + ((encodedMaxBoxes) * (BoxRefMaxSize()))
+ s += 5
+ // Calculating size of slice: z.ApplicationCallTxnFields.ForeignAssets
+ s += msgp.ArrayHeaderSize + ((encodedMaxForeignAssets) * (basics.AssetIndexMaxSize()))
+ s += 5 + basics.StateSchemaMaxSize() + 5 + basics.StateSchemaMaxSize() + 5 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 5 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 5 + msgp.Uint32Size + 7 + protocol.StateProofTypeMaxSize() + 3 + stateproof.StateProofMaxSize() + 6 + stateproofmsg.MessageMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *TxGroup) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -6363,6 +6687,14 @@ func (z *TxGroup) MsgIsZero() bool {
return (len((*z).TxGroupHashes) == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func TxGroupMaxSize() (s int) {
+ s = 1 + 7
+ // Calculating size of slice: z.TxGroupHashes
+ s += msgp.ArrayHeaderSize + ((config.MaxTxGroupSize) * (crypto.DigestMaxSize()))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *Txid) MarshalMsg(b []byte) []byte {
return ((*(crypto.Digest))(z)).MarshalMsg(b)
@@ -6390,3 +6722,8 @@ func (z *Txid) Msgsize() int {
func (z *Txid) MsgIsZero() bool {
return ((*(crypto.Digest))(z)).MsgIsZero()
}
+
+// MaxSize returns a maximum valid message size for this message type
+func TxidMaxSize() int {
+ return crypto.DigestMaxSize()
+}
diff --git a/data/transactions/sort.go b/data/transactions/sort.go
index 3ca8e4a56..d86d6e293 100644
--- a/data/transactions/sort.go
+++ b/data/transactions/sort.go
@@ -18,6 +18,7 @@ package transactions
// SortUint64 implements sorting by uint64 keys for
// canonical encoding of maps in msgpack format.
+//
//msgp:ignore SortUint64
//msgp:sort uint64 SortUint64
type SortUint64 []uint64
@@ -28,6 +29,7 @@ func (a SortUint64) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// SortString implements sorting by string keys for
// canonical encoding of maps in msgpack format.
+//
//msgp:ignore SortString
//msgp:sort string SortString
type SortString []string
diff --git a/data/transactions/stateproof.go b/data/transactions/stateproof.go
index 4bc7c9a24..89b14de75 100644
--- a/data/transactions/stateproof.go
+++ b/data/transactions/stateproof.go
@@ -41,8 +41,9 @@ func (sp StateProofTxnFields) Empty() bool {
sp.Message.MsgIsZero()
}
-//msgp:ignore specialAddr
// specialAddr is used to form a unique address that will send out state proofs.
+//
+//msgp:ignore specialAddr
type specialAddr string
// ToBeHashed implements the crypto.Hashable interface
diff --git a/data/transactions/teal.go b/data/transactions/teal.go
index 37388a0d9..6d09ab314 100644
--- a/data/transactions/teal.go
+++ b/data/transactions/teal.go
@@ -21,6 +21,8 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/protocol"
+ "golang.org/x/exp/maps"
+ "golang.org/x/exp/slices"
)
// EvalDelta stores StateDeltas for an application's global key/value store, as
@@ -40,7 +42,9 @@ type EvalDelta struct {
// can refer to it.
SharedAccts []basics.Address `codec:"sa,allocbound=config.MaxEvalDeltaAccounts"`
- Logs []string `codec:"lg,allocbound=config.MaxLogCalls"`
+ // The total allocbound calculation here accounts for the worse possible case of having config.MaxLogCalls individual log entries
+ // with the legnth of all of them summing up to config.MaxEvalDeltaTotalLogSize which is the limit for the sum of individual log lengths
+ Logs []string `codec:"lg,allocbound=config.MaxLogCalls,maxtotalbytes=(config.MaxLogCalls*msgp.StringPrefixSize) + config.MaxEvalDeltaTotalLogSize"`
InnerTxns []SignedTxnWithAD `codec:"itx,allocbound=config.MaxInnerTransactionsPerDelta"`
}
@@ -50,39 +54,19 @@ type EvalDelta struct {
// because the msgpack codec will encode/decode an empty map as nil, and we want
// an empty generated EvalDelta to equal an empty one we decode off the wire.
func (ed EvalDelta) Equal(o EvalDelta) bool {
- // LocalDeltas length should be the same
- if len(ed.LocalDeltas) != len(o.LocalDeltas) {
+ if !maps.EqualFunc(ed.LocalDeltas, o.LocalDeltas, maps.Equal[basics.StateDelta, basics.StateDelta]) {
return false
}
- // All keys and local StateDeltas should be the same
- for k, v := range ed.LocalDeltas {
- // Other LocalDelta must have value for key
- ov, ok := o.LocalDeltas[k]
- if !ok {
- return false
- }
-
- // Other LocalDelta must have same value for key
- if !ov.Equal(v) {
- return false
- }
- }
-
// GlobalDeltas must be equal
if !ed.GlobalDelta.Equal(o.GlobalDelta) {
return false
}
// Logs must be equal
- if len(ed.Logs) != len(o.Logs) {
+ if !slices.Equal(ed.Logs, o.Logs) {
return false
}
- for i, l := range ed.Logs {
- if l != o.Logs[i] {
- return false
- }
- }
// InnerTxns must be equal
if len(ed.InnerTxns) != len(o.InnerTxns) {
diff --git a/data/transactions/transaction.go b/data/transactions/transaction.go
index 5a397dc7c..c3da26ca5 100644
--- a/data/transactions/transaction.go
+++ b/data/transactions/transaction.go
@@ -26,6 +26,7 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/protocol"
+ "golang.org/x/exp/slices"
)
// Txid is a hash used to uniquely identify individual transactions
@@ -58,7 +59,7 @@ type Header struct {
FirstValid basics.Round `codec:"fv"`
LastValid basics.Round `codec:"lv"`
Note []byte `codec:"note,allocbound=config.MaxTxnNoteBytes"` // Uniqueness or app-level data about txn
- GenesisID string `codec:"gen"`
+ GenesisID string `codec:"gen,allocbound=config.MaxGenesisIDLen"`
GenesisHash crypto.Digest `codec:"gh"`
// Group specifies that this transaction is part of a
@@ -272,12 +273,7 @@ func (tx Header) Alive(tc TxnContext) error {
// MatchAddress checks if the transaction touches a given address.
func (tx Transaction) MatchAddress(addr basics.Address, spec SpecialAddresses) bool {
- for _, candidate := range tx.RelevantAddrs(spec) {
- if addr == candidate {
- return true
- }
- }
- return false
+ return slices.Contains(tx.RelevantAddrs(spec), addr)
}
var errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound = errors.New("transaction first voting round need to be less than its last voting round")
diff --git a/data/transactions/verify/txn.go b/data/transactions/verify/txn.go
index 528d5ea5e..8667d81e8 100644
--- a/data/transactions/verify/txn.go
+++ b/data/transactions/verify/txn.go
@@ -158,16 +158,17 @@ func (g *GroupContext) Equal(other *GroupContext) bool {
// txnBatchPrep verifies a SignedTxn having no obviously inconsistent data.
// Block-assembly time checks of LogicSig and accounting rules may still block the txn.
// It is the caller responsibility to call batchVerifier.Verify().
-func txnBatchPrep(s *transactions.SignedTxn, groupIndex int, groupCtx *GroupContext, verifier *crypto.BatchVerifier, evalTracer logic.EvalTracer) *TxGroupError {
+func txnBatchPrep(gi int, groupCtx *GroupContext, verifier *crypto.BatchVerifier, evalTracer logic.EvalTracer) *TxGroupError {
+ s := &groupCtx.signedGroupTxns[gi]
if !groupCtx.consensusParams.SupportRekeying && (s.AuthAddr != basics.Address{}) {
- return &TxGroupError{err: errRekeyingNotSupported, GroupIndex: groupIndex, Reason: TxGroupErrorReasonGeneric}
+ return &TxGroupError{err: errRekeyingNotSupported, GroupIndex: gi, Reason: TxGroupErrorReasonGeneric}
}
if err := s.Txn.WellFormed(groupCtx.specAddrs, groupCtx.consensusParams); err != nil {
- return &TxGroupError{err: err, GroupIndex: groupIndex, Reason: TxGroupErrorReasonNotWellFormed}
+ return &TxGroupError{err: err, GroupIndex: gi, Reason: TxGroupErrorReasonNotWellFormed}
}
- return stxnCoreChecks(s, groupIndex, groupCtx, verifier, evalTracer)
+ return stxnCoreChecks(gi, groupCtx, verifier, evalTracer)
}
// TxnGroup verifies a []SignedTxn as being signed and having no obviously inconsistent data.
@@ -209,7 +210,7 @@ func txnGroupBatchPrep(stxs []transactions.SignedTxn, contextHdr *bookkeeping.Bl
minFeeCount := uint64(0)
feesPaid := uint64(0)
for i, stxn := range stxs {
- prepErr := txnBatchPrep(&stxs[i], i, groupCtx, verifier, evalTracer)
+ prepErr := txnBatchPrep(i, groupCtx, verifier, evalTracer)
if prepErr != nil {
// re-wrap the error with more details
prepErr.err = fmt.Errorf("transaction %+v invalid : %w", stxn, prepErr.err)
@@ -252,7 +253,7 @@ const stateProofTxn sigOrTxnType = 4
// checkTxnSigTypeCounts checks the number of signature types and reports an error in case of a violation
func checkTxnSigTypeCounts(s *transactions.SignedTxn, groupIndex int) (sigType sigOrTxnType, err *TxGroupError) {
numSigCategories := 0
- if s.Sig != (crypto.Signature{}) {
+ if !s.Sig.Blank() {
numSigCategories++
sigType = regularSig
}
@@ -281,8 +282,9 @@ func checkTxnSigTypeCounts(s *transactions.SignedTxn, groupIndex int) (sigType s
}
// stxnCoreChecks runs signatures validity checks and enqueues signature into batchVerifier for verification.
-func stxnCoreChecks(s *transactions.SignedTxn, groupIndex int, groupCtx *GroupContext, batchVerifier *crypto.BatchVerifier, evalTracer logic.EvalTracer) *TxGroupError {
- sigType, err := checkTxnSigTypeCounts(s, groupIndex)
+func stxnCoreChecks(gi int, groupCtx *GroupContext, batchVerifier *crypto.BatchVerifier, evalTracer logic.EvalTracer) *TxGroupError {
+ s := &groupCtx.signedGroupTxns[gi]
+ sigType, err := checkTxnSigTypeCounts(s, gi)
if err != nil {
return err
}
@@ -293,17 +295,12 @@ func stxnCoreChecks(s *transactions.SignedTxn, groupIndex int, groupCtx *GroupCo
return nil
case multiSig:
if err := crypto.MultisigBatchPrep(s.Txn, crypto.Digest(s.Authorizer()), s.Msig, batchVerifier); err != nil {
- return &TxGroupError{err: fmt.Errorf("multisig validation failed: %w", err), GroupIndex: groupIndex, Reason: TxGroupErrorReasonMsigNotWellFormed}
+ return &TxGroupError{err: fmt.Errorf("multisig validation failed: %w", err), GroupIndex: gi, Reason: TxGroupErrorReasonMsigNotWellFormed}
}
- counter := 0
- for _, subsigi := range s.Msig.Subsigs {
- if (subsigi.Sig != crypto.Signature{}) {
- counter++
- }
- }
- if counter <= 4 {
+ sigs := s.Msig.Signatures()
+ if sigs <= 4 {
msigLessOrEqual4.Inc(nil)
- } else if counter <= 10 {
+ } else if sigs <= 10 {
msigLessOrEqual10.Inc(nil)
} else {
msigMore10.Inc(nil)
@@ -311,8 +308,8 @@ func stxnCoreChecks(s *transactions.SignedTxn, groupIndex int, groupCtx *GroupCo
return nil
case logicSig:
- if err := logicSigVerify(s, groupIndex, groupCtx, evalTracer); err != nil {
- return &TxGroupError{err: err, GroupIndex: groupIndex, Reason: TxGroupErrorReasonLogicSigFailed}
+ if err := logicSigVerify(gi, groupCtx, evalTracer); err != nil {
+ return &TxGroupError{err: err, GroupIndex: gi, Reason: TxGroupErrorReasonLogicSigFailed}
}
return nil
@@ -320,16 +317,16 @@ func stxnCoreChecks(s *transactions.SignedTxn, groupIndex int, groupCtx *GroupCo
return nil
default:
- return &TxGroupError{err: errUnknownSignature, GroupIndex: groupIndex, Reason: TxGroupErrorReasonGeneric}
+ return &TxGroupError{err: errUnknownSignature, GroupIndex: gi, Reason: TxGroupErrorReasonGeneric}
}
}
// LogicSigSanityCheck checks that the signature is valid and that the program is basically well formed.
// It does not evaluate the logic.
-func LogicSigSanityCheck(txn *transactions.SignedTxn, groupIndex int, groupCtx *GroupContext) error {
+func LogicSigSanityCheck(gi int, groupCtx *GroupContext) error {
batchVerifier := crypto.MakeBatchVerifier()
- if err := logicSigSanityCheckBatchPrep(txn, groupIndex, groupCtx, batchVerifier); err != nil {
+ if err := logicSigSanityCheckBatchPrep(gi, groupCtx, batchVerifier); err != nil {
return err
}
return batchVerifier.Verify()
@@ -338,12 +335,17 @@ func LogicSigSanityCheck(txn *transactions.SignedTxn, groupIndex int, groupCtx *
// logicSigSanityCheckBatchPrep checks that the signature is valid and that the program is basically well formed.
// It does not evaluate the logic.
// it is the caller responsibility to call batchVerifier.Verify()
-func logicSigSanityCheckBatchPrep(txn *transactions.SignedTxn, groupIndex int, groupCtx *GroupContext, batchVerifier *crypto.BatchVerifier) error {
- lsig := txn.Lsig
-
+func logicSigSanityCheckBatchPrep(gi int, groupCtx *GroupContext, batchVerifier *crypto.BatchVerifier) error {
if groupCtx.consensusParams.LogicSigVersion == 0 {
return errors.New("LogicSig not enabled")
}
+
+ if gi < 0 {
+ return errors.New("negative group index")
+ }
+ txn := &groupCtx.signedGroupTxns[gi]
+ lsig := txn.Lsig
+
if len(lsig.Logic) == 0 {
return errors.New("LogicSig.Logic empty")
}
@@ -358,9 +360,6 @@ func logicSigSanityCheckBatchPrep(txn *transactions.SignedTxn, groupIndex int, g
return errors.New("LogicSig.Logic too long")
}
- if groupIndex < 0 {
- return errors.New("negative groupIndex")
- }
txngroup := transactions.WrapSignedTxnsWithAD(groupCtx.signedGroupTxns)
ep := logic.EvalParams{
Proto: &groupCtx.consensusParams,
@@ -368,14 +367,14 @@ func logicSigSanityCheckBatchPrep(txn *transactions.SignedTxn, groupIndex int, g
MinAvmVersion: &groupCtx.minAvmVersion,
SigLedger: groupCtx.ledger, // won't be needed for CheckSignature
}
- err := logic.CheckSignature(groupIndex, &ep)
+ err := logic.CheckSignature(gi, &ep)
if err != nil {
return err
}
hasMsig := false
numSigs := 0
- if lsig.Sig != (crypto.Signature{}) {
+ if !lsig.Sig.Blank() {
numSigs++
}
if !lsig.Msig.Blank() {
@@ -403,15 +402,10 @@ func logicSigSanityCheckBatchPrep(txn *transactions.SignedTxn, groupIndex int, g
if err := crypto.MultisigBatchPrep(&program, crypto.Digest(txn.Authorizer()), lsig.Msig, batchVerifier); err != nil {
return fmt.Errorf("logic multisig validation failed: %w", err)
}
- counter := 0
- for _, subsigi := range lsig.Msig.Subsigs {
- if (subsigi.Sig != crypto.Signature{}) {
- counter++
- }
- }
- if counter <= 4 {
+ sigs := lsig.Msig.Signatures()
+ if sigs <= 4 {
msigLsigLessOrEqual4.Inc(nil)
- } else if counter <= 10 {
+ } else if sigs <= 10 {
msigLsigLessOrEqual10.Inc(nil)
} else {
msigLsigMore10.Inc(nil)
@@ -421,15 +415,12 @@ func logicSigSanityCheckBatchPrep(txn *transactions.SignedTxn, groupIndex int, g
}
// logicSigVerify checks that the signature is valid, executing the program.
-func logicSigVerify(txn *transactions.SignedTxn, groupIndex int, groupCtx *GroupContext, evalTracer logic.EvalTracer) error {
- err := LogicSigSanityCheck(txn, groupIndex, groupCtx)
+func logicSigVerify(gi int, groupCtx *GroupContext, evalTracer logic.EvalTracer) error {
+ err := LogicSigSanityCheck(gi, groupCtx)
if err != nil {
return err
}
- if groupIndex < 0 {
- return errors.New("negative groupIndex")
- }
ep := logic.EvalParams{
Proto: &groupCtx.consensusParams,
TxnGroup: transactions.WrapSignedTxnsWithAD(groupCtx.signedGroupTxns),
@@ -437,14 +428,14 @@ func logicSigVerify(txn *transactions.SignedTxn, groupIndex int, groupCtx *Group
SigLedger: groupCtx.ledger,
Tracer: evalTracer,
}
- pass, cx, err := logic.EvalSignatureFull(groupIndex, &ep)
+ pass, cx, err := logic.EvalSignatureFull(gi, &ep)
if err != nil {
logicErrTotal.Inc(nil)
- return fmt.Errorf("transaction %v: %w", txn.ID(), err)
+ return fmt.Errorf("transaction %v: %w", groupCtx.signedGroupTxns[gi].ID(), err)
}
if !pass {
logicRejTotal.Inc(nil)
- return fmt.Errorf("transaction %v: rejected by logic", txn.ID())
+ return fmt.Errorf("transaction %v: rejected by logic", groupCtx.signedGroupTxns[gi].ID())
}
logicGoodTotal.Inc(nil)
logicCostTotal.AddUint64(uint64(cx.Cost()), nil)
diff --git a/data/transactions/verify/txnBatch.go b/data/transactions/verify/txnBatch.go
index 40bd2dfff..6a45944ed 100644
--- a/data/transactions/verify/txnBatch.go
+++ b/data/transactions/verify/txnBatch.go
@@ -212,14 +212,7 @@ func getNumberOfBatchableSigsInTxn(stx *transactions.SignedTxn, groupIndex int)
case regularSig:
return 1, nil
case multiSig:
- sig := stx.Msig
- batchSigs := uint64(0)
- for _, subsigi := range sig.Subsigs {
- if (subsigi.Sig != crypto.Signature{}) {
- batchSigs++
- }
- }
- return batchSigs, nil
+ return uint64(stx.Msig.Signatures()), nil
case logicSig:
// Currently the sigs in here are not batched. Something to consider later.
return 0, nil
diff --git a/data/transactions/verify/txnBatch_test.go b/data/transactions/verify/txnBatch_test.go
index aeced948c..a7ee2df04 100644
--- a/data/transactions/verify/txnBatch_test.go
+++ b/data/transactions/verify/txnBatch_test.go
@@ -690,7 +690,7 @@ func TestStreamToBatchCtxCancelPoolQueue(t *testing.T) { //nolint:paralleltest /
// cancel the ctx as the sig is not yet sent to the exec pool
// the test might sporadically fail if between sending the txn above
// and the cancelation, 2 x waitForNextTxnDuration elapses (10ms)
- time.Sleep(12)
+ time.Sleep(12 * time.Millisecond)
go func() {
// wait a bit before releasing the tasks, so that the verificationPool ctx first gets canceled
time.Sleep(20 * time.Millisecond)
diff --git a/data/transactions/verify/txn_test.go b/data/transactions/verify/txn_test.go
index a26b0b099..744371113 100644
--- a/data/transactions/verify/txn_test.go
+++ b/data/transactions/verify/txn_test.go
@@ -59,10 +59,10 @@ var spec = transactions.SpecialAddresses{
RewardsPool: poolAddr,
}
-func verifyTxn(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContext) error {
+func verifyTxn(gi int, groupCtx *GroupContext) error {
batchVerifier := crypto.MakeBatchVerifier()
- if err := txnBatchPrep(s, txnIdx, groupCtx, batchVerifier, nil); err != nil {
+ if err := txnBatchPrep(gi, groupCtx, batchVerifier, nil); err != nil {
return err
}
return batchVerifier.Verify()
@@ -211,14 +211,15 @@ func TestSignedPayment(t *testing.T) {
groupCtx, err := PrepareGroupContext(stxns, blockHeader, nil)
require.NoError(t, err)
require.NoError(t, payment.WellFormed(spec, proto), "generateTestObjects generated an invalid payment")
- require.NoError(t, verifyTxn(&stxn, 0, groupCtx), "generateTestObjects generated a bad signedtxn")
+ require.NoError(t, verifyTxn(0, groupCtx), "generateTestObjects generated a bad signedtxn")
stxn2 := payment.Sign(secret)
require.Equal(t, stxn2.Sig, stxn.Sig, "got two different signatures for the same transaction (our signing function is deterministic)")
stxn2.MessUpSigForTesting()
require.Equal(t, stxn.ID(), stxn2.ID(), "changing sig caused txid to change")
- require.Error(t, verifyTxn(&stxn2, 0, groupCtx), "verify succeeded with bad sig")
+ groupCtx.signedGroupTxns[0] = stxn2
+ require.Error(t, verifyTxn(0, groupCtx), "verify succeeded with bad sig")
require.True(t, crypto.SignatureVerifier(addr).Verify(payment, stxn.Sig), "signature on the transaction is not the signature of the hash of the transaction under the spender's key")
}
@@ -231,15 +232,13 @@ func TestTxnValidationEncodeDecode(t *testing.T) {
for _, txn := range signed {
groupCtx, err := PrepareGroupContext([]transactions.SignedTxn{txn}, blockHeader, nil)
require.NoError(t, err)
- if verifyTxn(&txn, 0, groupCtx) != nil {
+ if verifyTxn(0, groupCtx) != nil {
t.Errorf("signed transaction %#v did not verify", txn)
}
x := protocol.Encode(&txn)
- var signedTx transactions.SignedTxn
- protocol.Decode(x, &signedTx)
-
- if verifyTxn(&signedTx, 0, groupCtx) != nil {
+ protocol.Decode(x, &groupCtx.signedGroupTxns[0])
+ if verifyTxn(0, groupCtx) != nil {
t.Errorf("signed transaction %#v did not verify", txn)
}
}
@@ -253,14 +252,14 @@ func TestTxnValidationEmptySig(t *testing.T) {
for _, txn := range signed {
groupCtx, err := PrepareGroupContext([]transactions.SignedTxn{txn}, blockHeader, nil)
require.NoError(t, err)
- if verifyTxn(&txn, 0, groupCtx) != nil {
+ if verifyTxn(0, groupCtx) != nil {
t.Errorf("signed transaction %#v did not verify", txn)
}
- txn.Sig = crypto.Signature{}
- txn.Msig = crypto.MultisigSig{}
- txn.Lsig = transactions.LogicSig{}
- if verifyTxn(&txn, 0, groupCtx) == nil {
+ groupCtx.signedGroupTxns[0].Sig = crypto.Signature{}
+ groupCtx.signedGroupTxns[0].Msig = crypto.MultisigSig{}
+ groupCtx.signedGroupTxns[0].Lsig = transactions.LogicSig{}
+ if verifyTxn(0, groupCtx) == nil {
t.Errorf("transaction %#v verified without sig", txn)
}
}
@@ -296,13 +295,14 @@ func TestTxnValidationStateProof(t *testing.T) {
groupCtx, err := PrepareGroupContext([]transactions.SignedTxn{stxn}, blockHeader, nil)
require.NoError(t, err)
- err = verifyTxn(&stxn, 0, groupCtx)
+ err = verifyTxn(0, groupCtx)
require.NoError(t, err, "state proof txn %#v did not verify", stxn)
stxn2 := stxn
stxn2.Txn.Type = protocol.PaymentTx
stxn2.Txn.Header.Fee = basics.MicroAlgos{Raw: proto.MinTxnFee}
- err = verifyTxn(&stxn2, 0, groupCtx)
+ groupCtx.signedGroupTxns[0] = stxn2
+ err = verifyTxn(0, groupCtx)
require.Error(t, err, "payment txn %#v verified from StateProofSender", stxn2)
secret := keypair()
@@ -310,28 +310,33 @@ func TestTxnValidationStateProof(t *testing.T) {
stxn2.Txn.Header.Sender = basics.Address(secret.SignatureVerifier)
stxn2.Txn.Header.Fee = basics.MicroAlgos{Raw: proto.MinTxnFee}
stxn2 = stxn2.Txn.Sign(secret)
- err = verifyTxn(&stxn2, 0, groupCtx)
+ groupCtx.signedGroupTxns[0] = stxn2
+ err = verifyTxn(0, groupCtx)
require.Error(t, err, "state proof txn %#v verified from non-StateProofSender", stxn2)
// state proof txns are not allowed to have non-zero values for many fields
stxn2 = stxn
stxn2.Txn.Header.Fee = basics.MicroAlgos{Raw: proto.MinTxnFee}
- err = verifyTxn(&stxn2, 0, groupCtx)
+ groupCtx.signedGroupTxns[0] = stxn2
+ err = verifyTxn(0, groupCtx)
require.Error(t, err, "state proof txn %#v verified", stxn2)
stxn2 = stxn
stxn2.Txn.Header.Note = []byte{'A'}
- err = verifyTxn(&stxn2, 0, groupCtx)
+ groupCtx.signedGroupTxns[0] = stxn2
+ err = verifyTxn(0, groupCtx)
require.Error(t, err, "state proof txn %#v verified", stxn2)
stxn2 = stxn
stxn2.Txn.Lease[0] = 1
- err = verifyTxn(&stxn2, 0, groupCtx)
+ groupCtx.signedGroupTxns[0] = stxn2
+ err = verifyTxn(0, groupCtx)
require.Error(t, err, "state proof txn %#v verified", stxn2)
stxn2 = stxn
stxn2.Txn.RekeyTo = basics.Address(secret.SignatureVerifier)
- err = verifyTxn(&stxn2, 0, groupCtx)
+ groupCtx.signedGroupTxns[0] = stxn2
+ err = verifyTxn(0, groupCtx)
require.Error(t, err, "state proof txn %#v verified", stxn2)
}
@@ -349,7 +354,7 @@ func TestDecodeNil(t *testing.T) {
// This used to panic when run on a zero value of SignedTxn.
groupCtx, err := PrepareGroupContext([]transactions.SignedTxn{st}, blockHeader, nil)
require.NoError(t, err)
- verifyTxn(&st, 0, groupCtx)
+ verifyTxn(0, groupCtx)
}
}
@@ -626,7 +631,7 @@ func TestPaysetGroups(t *testing.T) {
func BenchmarkPaysetGroups(b *testing.B) {
if b.N < 2000 {
- b.N = 2000
+ b.N = 2000 //nolint:staticcheck // intentionally setting b.N
}
_, signedTxn, secrets, addrs := generateTestObjects(b.N, 20, 0, 50)
blkHdr := createDummyBlockHeader()
@@ -1056,7 +1061,7 @@ func verifyGroup(t *testing.T, txnGroups [][]transactions.SignedTxn, blkHdr *boo
func BenchmarkTxn(b *testing.B) {
if b.N < 2000 {
- b.N = 2000
+ b.N = 2000 //nolint:staticcheck // intentionally setting b.N
}
_, signedTxn, secrets, addrs := generateTestObjects(b.N, 20, 0, 50)
blk := bookkeeping.Block{BlockHeader: createDummyBlockHeader()}
@@ -1066,8 +1071,8 @@ func BenchmarkTxn(b *testing.B) {
for _, txnGroup := range txnGroups {
groupCtx, err := PrepareGroupContext(txnGroup, &blk.BlockHeader, nil)
require.NoError(b, err)
- for i, txn := range txnGroup {
- err := verifyTxn(&txn, i, groupCtx)
+ for i := range txnGroup {
+ err := verifyTxn(i, groupCtx)
require.NoError(b, err)
}
}
diff --git a/data/txHandler.go b/data/txHandler.go
index 3ad271670..74f9b07b7 100644
--- a/data/txHandler.go
+++ b/data/txHandler.go
@@ -171,13 +171,11 @@ func MakeTxHandler(opts TxHandlerOpts) (*TxHandler, error) {
streamVerifierDropped: make(chan *verify.UnverifiedTxnSigJob),
}
- // use defaultBacklogSize = approx number of txns in a full block as a parameter for the dedup cache size
- defaultBacklogSize := config.GetDefaultLocal().TxBacklogSize
if opts.Config.TxFilterRawMsgEnabled() {
- handler.msgCache = makeSaltedCache(2 * defaultBacklogSize)
+ handler.msgCache = makeSaltedCache(int(opts.Config.TxIncomingFilterMaxSize))
}
if opts.Config.TxFilterCanonicalEnabled() {
- handler.txCanonicalCache = makeDigestCache(2 * defaultBacklogSize)
+ handler.txCanonicalCache = makeDigestCache(int(opts.Config.TxIncomingFilterMaxSize))
}
if opts.Config.EnableTxBacklogRateLimiting {
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 71fcc16d6..db01e6fad 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -1,8 +1,8 @@
-FROM ubuntu:18.04
+FROM ubuntu:22.04
ARG GOLANG_VERSION
ENV DEBIAN_FRONTEND noninteractive
-RUN apt update && apt-get install -y git libboost-all-dev wget sqlite3 autoconf sudo tzdata bsdmainutils
+RUN apt update && apt-get install -y git wget autoconf sudo tzdata bsdmainutils
WORKDIR /root
RUN wget --quiet https://dl.google.com/go/go${GOLANG_VERSION}.linux-amd64.tar.gz && tar -xvf go${GOLANG_VERSION}.linux-amd64.tar.gz && mv go /usr/local
diff --git a/docker/README.md b/docker/README.md
index a97e3296d..d218b1013 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -36,17 +36,19 @@ The following environment variables can be supplied. Except when noted, it is po
| Variable | Description |
| -------- | ----------- |
-| NETWORK | Leave blank for a private network, otherwise specify one of mainnet, betanet, testnet, or devnet. Only used during a data directory initialization. |
-| PROFILE | If set, initializes the config.json file according to the given profile. |
-| DEV_MODE | If set to 1 on a private network, enable dev mode. Only used during data directory initialization. |
-| START_KMD | When set to 1, start kmd service with no timeout. THIS SHOULD NOT BE USED IN PRODUCTION. |
-| FAST_CATCHUP | If set to 1 on a public network, attempt to start fast-catchup during initial config. |
-| TOKEN | If set, overrides the REST API token. |
-| ADMIN_TOKEN | If set, overrides the REST API admin token. |
-| KMD_TOKEN | If set along with `START_KMD`, override the KMD REST API token. |
-| TELEMETRY_NAME | If set on a public network, telemetry is reported with this name. |
-| NUM_ROUNDS | If set on a private network, override default of 30000 participation keys. |
-| PEER_ADDRESS | If set, override phonebook with peer ip:port (or semicolon separated list: ip:port;ip:port;ip:port...) |
+| NETWORK | Leave blank for a private network, otherwise specify one of mainnet, betanet, testnet, or devnet. Only used during a data directory initialization. |
+| PROFILE | If set, initializes the config.json file according to the given profile. |
+| DEV_MODE | If set to 1 on a private network, enable dev mode. Only used during data directory initialization. |
+| START_KMD | When set to 1, start kmd service with no timeout. THIS SHOULD NOT BE USED IN PRODUCTION. |
+| FAST_CATCHUP | If set to 1 on a public network, attempt to start fast-catchup during initial config. |
+| TOKEN | If set, overrides the REST API token. |
+| ADMIN_TOKEN | If set, overrides the REST API admin token. |
+| KMD_TOKEN | If set along with `START_KMD`, override the KMD REST API token. |
+| TELEMETRY_NAME | If set on a public network, telemetry is reported with this name. |
+| NUM_ROUNDS | If set on a private network, override default of 30000 participation keys. |
+| GENESIS_ADDRESS | If set, use this API address to initialize the genesis file. |
+| PEER_ADDRESS | If set, override phonebook with peer ip:port (or semicolon separated list: ip:port;ip:port;ip:port...) |
+| GOSSIP_PORT | If set, configure the node to listen for external connections on this address. For example "4161" |
### Special Files
@@ -58,7 +60,7 @@ Configuration can be modified by specifying certain files. These can be changed
| /etc/algorand/algod.token | Override default randomized REST API token. |
| /etc/algorand/algod.admin.token | Override default randomized REST API admin token. |
| /etc/algorand/logging.config | Use a custom [logging.config](https://developer.algorand.org/docs/run-a-node/reference/telemetry-config/#configuration) file for configuring telemetry. |
- | /etc/algorand/template.json | Override default private network topology. One of the nodes in the template must be named "data".|
+ | /etc/algorand/template.json | Override default private network topology. One of the nodes in the template must be named "data".|
## Example Configuration
diff --git a/docker/build/Dockerfile b/docker/build/Dockerfile
index aeec9ae07..5d096cbab 100644
--- a/docker/build/Dockerfile
+++ b/docker/build/Dockerfile
@@ -1,7 +1,7 @@
-FROM ubuntu:16.04
+FROM ubuntu:20.04
ARG GOLANG_VERSION
-RUN apt-get update && apt-get install -y git libboost-all-dev wget sqlite3 autoconf build-essential shellcheck
+RUN apt-get update && apt-get install -y git wget autoconf build-essential shellcheck
WORKDIR /root
RUN wget --quiet https://dl.google.com/go/go${GOLANG_VERSION}.linux-amd64.tar.gz && tar -xvf go${GOLANG_VERSION}.linux-amd64.tar.gz && mv go /usr/local
ENV GOROOT=/usr/local/go \
diff --git a/docker/build/Dockerfile-deploy b/docker/build/Dockerfile-deploy
index d8c4a38ee..d01b505f4 100644
--- a/docker/build/Dockerfile-deploy
+++ b/docker/build/Dockerfile-deploy
@@ -1,7 +1,7 @@
-FROM --platform=linux/amd64 ubuntu:18.04
+FROM --platform=linux/amd64 ubuntu:20.04
ARG GOLANG_VERSION
-RUN apt-get update && apt-get install -y git libboost-all-dev wget sqlite3 autoconf jq bsdmainutils shellcheck
+RUN apt-get update && apt-get install -y git wget autoconf jq bsdmainutils shellcheck
WORKDIR /root
RUN wget --quiet https://dl.google.com/go/go${GOLANG_VERSION}.linux-amd64.tar.gz && tar -xvf go${GOLANG_VERSION}.linux-amd64.tar.gz && mv go /usr/local
ENV GOROOT=/usr/local/go \
diff --git a/docker/build/aptly.Dockerfile b/docker/build/aptly.Dockerfile
index 54e5cab67..1849d8e5e 100644
--- a/docker/build/aptly.Dockerfile
+++ b/docker/build/aptly.Dockerfile
@@ -1,4 +1,4 @@
-FROM ubuntu:18.04
+FROM ubuntu:20.04
ARG ARCH=amd64
ARG GOLANG_VERSION
diff --git a/docker/build/cicd.alpine.Dockerfile b/docker/build/cicd.alpine.Dockerfile
index e9c1f9977..c3ef9698a 100644
--- a/docker/build/cicd.alpine.Dockerfile
+++ b/docker/build/cicd.alpine.Dockerfile
@@ -5,7 +5,6 @@ RUN apk update && \
apk add bash && \
apk add git && \
apk add python3 && \
- apk add boost-dev && \
apk add expect && \
apk add jq && \
apk add autoconf && \
@@ -14,8 +13,7 @@ RUN apk update && \
apk add automake && \
apk add fmt && \
apk add build-base && \
- apk add musl-dev && \
- apk add sqlite
+ apk add musl-dev
RUN apk add dpkg && \
wget http://deb.debian.org/debian/pool/main/s/shellcheck/shellcheck_0.5.0-3_armhf.deb && \
@@ -26,6 +24,7 @@ COPY . $GOPATH/src/github.com/algorand/go-algorand
WORKDIR $GOPATH/src/github.com/algorand/go-algorand
ENV GCC_CONFIG="--with-arch=armv6" \
GOPROXY=https://proxy.golang.org,https://pkg.go.dev,https://goproxy.io,direct
+RUN git config --global --add safe.directory '*'
RUN make clean
RUN rm -rf $GOPATH/src/github.com/algorand/go-algorand && \
mkdir -p $GOPATH/src/github.com/algorand/go-algorand
diff --git a/docker/build/cicd.centos.Dockerfile b/docker/build/cicd.centos.Dockerfile
index e51c05f2f..f292e3d22 100644
--- a/docker/build/cicd.centos.Dockerfile
+++ b/docker/build/cicd.centos.Dockerfile
@@ -5,7 +5,7 @@ ARG GOLANG_VERSION
ARG ARCH="amd64"
RUN yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \
yum update -y && \
- yum install -y autoconf wget awscli git gnupg2 nfs-utils python3-devel sqlite3 boost-devel expect jq \
+ yum install -y autoconf wget awscli git gnupg2 nfs-utils python3-devel expect jq \
libtool gcc-c++ libstdc++-devel libstdc++-static rpmdevtools createrepo rpm-sign bzip2 which ShellCheck \
libffi-devel openssl-devel
WORKDIR /root
@@ -20,6 +20,7 @@ COPY . $GOPATH/src/github.com/algorand/go-algorand
ENV PATH=$GOPATH/bin:$GOROOT/bin:$PATH \
GOPROXY=https://proxy.golang.org,https://pkg.go.dev,https://goproxy.io,direct
WORKDIR $GOPATH/src/github.com/algorand/go-algorand
+RUN git config --global --add safe.directory '*'
RUN make clean
RUN rm -rf $GOPATH/src/github.com/algorand/go-algorand && \
mkdir -p $GOPATH/src/github.com/algorand/go-algorand
diff --git a/docker/build/cicd.centos8.Dockerfile b/docker/build/cicd.centos8.Dockerfile
index 3b661abf8..28ec63484 100644
--- a/docker/build/cicd.centos8.Dockerfile
+++ b/docker/build/cicd.centos8.Dockerfile
@@ -5,12 +5,11 @@ ARG GOLANG_VERSION
ARG ARCH="amd64"
RUN dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
dnf update -y && \
- dnf install -y autoconf wget awscli git gnupg2 nfs-utils python3-devel boost-devel expect jq \
+ dnf install -y autoconf wget awscli git gnupg2 nfs-utils python3-devel expect jq \
libtool gcc-c++ libstdc++-devel rpmdevtools createrepo rpm-sign bzip2 which \
libffi-devel openssl-devel
RUN dnf install -y epel-release && \
dnf update && \
- dnf -y install sqlite && \
dnf -y --enablerepo=powertools install libstdc++-static && \
dnf -y install make
RUN echo "${BOLD}Downloading and installing binaries...${RESET}" && \
@@ -28,6 +27,7 @@ COPY . $GOPATH/src/github.com/algorand/go-algorand
ENV PATH=$GOPATH/bin:$GOROOT/bin:$PATH \
GOPROXY=https://proxy.golang.org,https://pkg.go.dev,https://goproxy.io,direct
WORKDIR $GOPATH/src/github.com/algorand/go-algorand
+RUN git config --global --add safe.directory '*'
RUN make clean
RUN rm -rf $GOPATH/src/github.com/algorand/go-algorand && \
mkdir -p $GOPATH/src/github.com/algorand/go-algorand
diff --git a/docker/build/cicd.ubuntu.Dockerfile b/docker/build/cicd.ubuntu.Dockerfile
index 011da3182..0b9fce77d 100644
--- a/docker/build/cicd.ubuntu.Dockerfile
+++ b/docker/build/cicd.ubuntu.Dockerfile
@@ -1,11 +1,11 @@
ARG ARCH="amd64"
-FROM ${ARCH}/ubuntu:18.04
+FROM ${ARCH}/ubuntu:20.04
ARG GOLANG_VERSION
ARG ARCH="amd64"
ARG GOARCH="amd64"
ENV DEBIAN_FRONTEND noninteractive
-RUN apt-get update && apt-get install -y build-essential git libboost-all-dev wget sqlite3 autoconf jq bsdmainutils shellcheck awscli
+RUN apt-get update && apt-get install -y build-essential git wget autoconf jq bsdmainutils shellcheck awscli libtool
WORKDIR /root
RUN wget https://dl.google.com/go/go${GOLANG_VERSION}.linux-${GOARCH}.tar.gz \
&& tar -xvf go${GOLANG_VERSION}.linux-${GOARCH}.tar.gz && \
diff --git a/docker/build/docker.ubuntu.Dockerfile b/docker/build/docker.ubuntu.Dockerfile
index 9b94659ba..5091afefa 100644
--- a/docker/build/docker.ubuntu.Dockerfile
+++ b/docker/build/docker.ubuntu.Dockerfile
@@ -1,6 +1,6 @@
ARG ARCH="amd64"
-FROM ${ARCH}/ubuntu:18.04
+FROM ${ARCH}/ubuntu:20.04
ARG GOLANG_VERSION
ARG ARCH="amd64"
RUN apt-get update && apt-get install curl python python3.7 python3-pip build-essential apt-transport-https ca-certificates software-properties-common -y && \
@@ -14,7 +14,7 @@ RUN update-alternatives --install /usr/bin/python python /usr/bin/python2.7 1 &&
update-alternatives --set python /usr/bin/python3.7 && \
pip3 install mulecli
-RUN apt-get update && apt-get install -y autoconf bsdmainutils git libboost-all-dev && \
+RUN apt-get update && apt-get install -y autoconf bsdmainutils git && \
curl https://dl.google.com/go/go${GOLANG_VERSION}.linux-amd64.tar.gz | tar -xzf - && \
mv go /usr/local
diff --git a/docker/build/releases-page.Dockerfile b/docker/build/releases-page.Dockerfile
index 80814f7fb..c96f332d5 100644
--- a/docker/build/releases-page.Dockerfile
+++ b/docker/build/releases-page.Dockerfile
@@ -1,4 +1,4 @@
-FROM ubuntu:18.04
+FROM ubuntu:20.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install git python3 python3-pip -y && \
diff --git a/docker/files/run/run.sh b/docker/files/run/run.sh
index f6b12798f..eef16959f 100755
--- a/docker/files/run/run.sh
+++ b/docker/files/run/run.sh
@@ -2,6 +2,11 @@
set -e
+if [ "$ALGORAND_DATA" != "/algod/data" ]; then
+ echo "Do not override 'ALGORAND_DATA' environment variable."
+ exit 1
+fi
+
if [ "$DEBUG" = "1" ]; then
set -x
fi
@@ -12,7 +17,7 @@ fi
# as the algorand user.
if [ "$(id -u)" = '0' ]; then
chown -R algorand:algorand $ALGORAND_DATA
- exec runuser -u algorand "$BASH_SOURCE"
+ exec gosu algorand "$0" "$@"
fi
# Script to configure or resume a network. Based on environment settings the
@@ -71,13 +76,19 @@ function configure_data_dir() {
# initialize config with profile.
if [ "$PROFILE" != "" ]; then
- algocfg profile set --yes -d "$ALGORAND_DATA" "$PROFILE"
+ algocfg profile set --yes -d "$ALGORAND_DATA" "$PROFILE"
+ fi
+
+ # set profile overrides
+ if [ "$GOSSIP_PORT" != "" ]; then
+ algocfg -d . set -p NetAddress -v "0.0.0.0:${GOSSIP_PORT}"
+ algocfg -d . set -p DisableNetworking -v "false"
+ algocfg -d . set -p IncomingConnectionsLimit -v "1000"
fi
- # call after copying config.json to make sure the port is exposed.
algocfg -d . set -p EndpointAddress -v "0.0.0.0:${ALGOD_PORT}"
- # check for token overrides
+ # set token overrides
for dir in ${ALGORAND_DATA}/../*/; do
if [ "$TOKEN" != "" ]; then
echo "$TOKEN" > "$dir/algod.token"
@@ -121,41 +132,48 @@ function start_kmd() {
}
function start_new_public_network() {
- cd /algod
- if [ ! -d "/node/run/genesis/${NETWORK}" ]; then
- echo "No genesis file for '$NETWORK' is available."
- exit 1
- fi
-
mkdir -p "$ALGORAND_DATA"
-
cd "$ALGORAND_DATA"
- cp "/node/run/genesis/${NETWORK}/genesis.json" genesis.json
- cp /node/run/config.json.example config.json
+ # initialize genesis.json
+ if [ "$GENESIS_ADDRESS" != "" ]; then
+ # download genesis file from peer
+ echo "Attempting to download genesis file from $GENESIS_ADDRESS"
+ curl "$GENESIS_ADDRESS/genesis" -o genesis.json
+ elif [ -d "/node/run/genesis/${NETWORK}" ]; then
+ echo "Installing genesis file for ${NETWORK}"
+ cp "/node/run/genesis/${NETWORK}/genesis.json" genesis.json
+ else
+ echo "No genesis file for '$NETWORK' is available."
+ exit 1
+ fi
configure_data_dir
- local ID
- case $NETWORK in
- mainnet) ID="<network>.algorand.network" ;;
- testnet) ID="<network>.algorand.network" ;;
- betanet) ID="<network>.algodev.network" ;;
- alphanet) ID="<network>.algodev.network" ;;
- devnet) ID="<network>.algodev.network" ;;
- *)
- echo "Unknown network"
- exit 1
- ;;
- esac
- set -p DNSBootstrapID -v "$ID"
+ # if the peer address is set, it will be used instead of the DNS bootstrap ID
+ if [ "$PEER_ADDRESS" != "" ]; then
+ local ID
+ case $NETWORK in
+ mainnet) ID="<network>.algorand.network" ;;
+ testnet) ID="<network>.algorand.network" ;;
+ betanet) ID="<network>.algodev.network" ;;
+ alphanet) ID="<network>.algodev.network" ;;
+ devnet) ID="<network>.algodev.network" ;;
+ *)
+ echo "Unknown network."
+ exit 1
+ ;;
+ esac
+
+ set -p DNSBootstrapID -v "$ID"
+ fi
start_public_network
}
function start_private_network() {
configure_data_dir
- start_kmd
+ start_kmd &
# TODO: Is there a way to properly exec a private network?
goal network start -r "${ALGORAND_DATA}/.."
@@ -182,19 +200,21 @@ function start_new_private_network() {
##############
echo "Starting Algod Docker Container"
-echo " ALGORAND_DATA: $ALGORAND_DATA"
-echo " NETWORK: $NETWORK"
-echo " PROFILE: $PROFILE"
-echo " DEV_MODE: $DEV_MODE"
-echo " START_KMD: ${START_KMD:-"Not Set"}"
-echo " FAST_CATCHUP: $FAST_CATCHUP"
-echo " TOKEN: ${TOKEN:-"Not Set"}"
-echo " ADMIN_TOKEN: ${ADMIN_TOKEN:-"Not Set"}"
-echo " KMD_TOKEN: ${KMD_TOKEN:-"Not Set"}"
-echo " TELEMETRY_NAME: $TELEMETRY_NAME"
-echo " NUM_ROUNDS: $NUM_ROUNDS"
-echo " PEER_ADDRESS: $PEER_ADDRESS"
-echo " ALGOD_PORT: $ALGOD_PORT"
+echo " ALGORAND_DATA: $ALGORAND_DATA"
+echo " NETWORK: $NETWORK"
+echo " PROFILE: $PROFILE"
+echo " DEV_MODE: $DEV_MODE"
+echo " START_KMD: ${START_KMD:-"Not Set"}"
+echo " FAST_CATCHUP: $FAST_CATCHUP"
+echo " TOKEN: ${TOKEN:-"Not Set"}"
+echo " ADMIN_TOKEN: ${ADMIN_TOKEN:-"Not Set"}"
+echo " KMD_TOKEN: ${KMD_TOKEN:-"Not Set"}"
+echo " TELEMETRY_NAME: $TELEMETRY_NAME"
+echo " NUM_ROUNDS: $NUM_ROUNDS"
+echo " GENESIS_ADDRESS: $GENESIS_ADDRESS"
+echo " PEER_ADDRESS: $PEER_ADDRESS"
+echo " GOSSIP_PORT: $GOSSIP_PORT"
+echo " ALGOD_PORT: $ALGOD_PORT"
# If data directory is initialized, start existing environment.
if [ -f "$ALGORAND_DATA/../network.json" ]; then
@@ -206,7 +226,7 @@ elif [ -f "$ALGORAND_DATA/genesis.json" ]; then
fi
# Initialize and start network.
-if [ "$NETWORK" == "" ]; then
+if [ "$NETWORK" == "" ] && [ "$PEER_ADDRESS" == "" ]; then
start_new_private_network
else
start_new_public_network
diff --git a/docs/follower_node.md b/docs/follower_node.md
index 9c58a7a82..8df230641 100644
--- a/docs/follower_node.md
+++ b/docs/follower_node.md
@@ -25,6 +25,7 @@ Follower mode was initially created to be a data source for [Conduit](https://gi
Behavior is controlled with the `config.json` file:
| property | description |
+| -------- | ----------- |
| EnableFollowMode | When set to `true` the node starts as a network follower. |
| MaxAcctLookback | The number of additional `Ledger State Delta` objects available. The default can be used, increasing to 64 or higher could help performance. |
| CatchupParallelBlocks | The number of blocks that are fetched concurrently. The default can be used, increasing to 64 or higher could help performance. |
diff --git a/gen/generate.go b/gen/generate.go
index ea8792118..5fb1c0126 100644
--- a/gen/generate.go
+++ b/gen/generate.go
@@ -169,7 +169,7 @@ func generateGenesisFiles(protoVersion protocol.ConsensusVersion, protoParams co
comment = genData.Comment
genesisAddrs = make(map[string]basics.Address)
- records = make(map[string]basics.AccountData)
+ records = make(map[string]bookkeeping.GenesisAccountData)
)
if partKeyDilution == 0 {
@@ -275,7 +275,7 @@ func generateGenesisFiles(protoVersion protocol.ConsensusVersion, protoParams co
}
}
- var data basics.AccountData
+ var data bookkeeping.GenesisAccountData
data.Status = wallet.Online
data.MicroAlgos.Raw = wallet.Stake
if wallet.Online == basics.Online {
@@ -345,12 +345,12 @@ func generateGenesisFiles(protoVersion protocol.ConsensusVersion, protoParams co
rewardsBalance = protoParams.MinBalance
}
- records["FeeSink"] = basics.AccountData{
+ records["FeeSink"] = bookkeeping.GenesisAccountData{
Status: basics.NotParticipating,
MicroAlgos: basics.MicroAlgos{Raw: protoParams.MinBalance},
}
- records["RewardsPool"] = basics.AccountData{
+ records["RewardsPool"] = bookkeeping.GenesisAccountData{
Status: basics.NotParticipating,
MicroAlgos: basics.MicroAlgos{Raw: rewardsBalance},
}
diff --git a/go.mod b/go.mod
index f490570eb..a69db49ce 100644
--- a/go.mod
+++ b/go.mod
@@ -1,23 +1,24 @@
module github.com/algorand/go-algorand
-go 1.17
+go 1.20
require (
github.com/DataDog/zstd v1.5.2
github.com/algorand/avm-abi v0.2.0
- github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414
- github.com/algorand/go-codec/codec v1.1.9
+ github.com/algorand/falcon v0.1.0
+ github.com/algorand/go-codec/codec v1.1.10
github.com/algorand/go-deadlock v0.2.2
github.com/algorand/go-sumhash v0.1.0
github.com/algorand/graphtrace v0.1.0
- github.com/algorand/msgp v1.1.53
+ github.com/algorand/msgp v1.1.55
github.com/algorand/oapi-codegen v1.12.0-algorand.0
+ github.com/algorand/sortition v1.0.0
github.com/algorand/websocket v1.4.6
github.com/aws/aws-sdk-go v1.33.0
github.com/consensys/gnark-crypto v0.7.0
github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018
github.com/dchest/siphash v1.2.1
- github.com/fatih/color v1.7.0
+ github.com/fatih/color v1.13.0
github.com/getkin/kin-openapi v0.107.0
github.com/gofrs/flock v0.7.0
github.com/golang/snappy v0.0.4
@@ -27,20 +28,22 @@ require (
github.com/karalabe/usb v0.0.2
github.com/labstack/echo/v4 v4.9.1
github.com/mattn/go-sqlite3 v1.10.0
- github.com/miekg/dns v1.1.27
+ github.com/miekg/dns v1.1.41
github.com/olivere/elastic v6.2.14+incompatible
github.com/sirupsen/logrus v1.8.1
- github.com/spf13/cobra v0.0.3
+ github.com/spf13/cobra v1.3.0
github.com/stretchr/testify v1.8.1
golang.org/x/crypto v0.1.0
+ golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
golang.org/x/sys v0.7.0
golang.org/x/text v0.9.0
gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009
+ pgregory.net/rapid v0.6.2
)
require (
github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
- github.com/cpuguy83/go-md2man v1.0.8 // indirect
+ github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/fortytw2/leaktest v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
@@ -53,7 +56,7 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/labstack/gommon v0.4.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
- github.com/mattn/go-colorable v0.1.11 // indirect
+ github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
@@ -61,12 +64,13 @@ require (
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/russross/blackfriday v1.5.2 // indirect
+ github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/objx v0.5.0 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasttemplate v1.2.1 // indirect
golang.org/x/net v0.9.0 // indirect
+ golang.org/x/sync v0.1.0 // indirect
golang.org/x/term v0.7.0 // indirect
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
diff --git a/go.sum b/go.sum
index 972819621..4e2d9d22c 100644
--- a/go.sum
+++ b/go.sum
@@ -1,34 +1,125 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
+cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
+cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
+cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
+cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
+cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
+cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
+cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM=
+cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8=
github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/algorand/avm-abi v0.2.0 h1:bkjsG+BOEcxUcnGSALLosmltE0JZdg+ZisXKx0UDX2k=
github.com/algorand/avm-abi v0.2.0/go.mod h1:+CgwM46dithy850bpTeHh9MC99zpn2Snirb3QTl2O/g=
-github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414 h1:nwYN+GQ7Z5OOfZwqBO1ma7DSlP7S1YrKWICOyjkwqrc=
-github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414/go.mod h1:OkQyHlGvS0kLNcIWbC21/uQcnbfwSOQm+wiqWwBG9pQ=
-github.com/algorand/go-codec/codec v1.1.9 h1:el4HFSPZhP+YCgOZxeFGB/BqlNkaUIs55xcALulUTCM=
-github.com/algorand/go-codec/codec v1.1.9/go.mod h1:YkEx5nmr/zuCeaDYOIhlDg92Lxju8tj2d2NrYqP7g7k=
+github.com/algorand/falcon v0.1.0 h1:xl832kfZ7hHG6B4p90DQynjfKFGbIUgUOnsRiMZXfAo=
+github.com/algorand/falcon v0.1.0/go.mod h1:OkQyHlGvS0kLNcIWbC21/uQcnbfwSOQm+wiqWwBG9pQ=
+github.com/algorand/go-codec/codec v1.1.10 h1:zmWYU1cp64jQVTOG8Tw8wa+k0VfwgXIPbnDfiVa+5QA=
+github.com/algorand/go-codec/codec v1.1.10/go.mod h1:YkEx5nmr/zuCeaDYOIhlDg92Lxju8tj2d2NrYqP7g7k=
github.com/algorand/go-deadlock v0.2.2 h1:L7AKATSUCzoeVuOgpTipfCEjdUu5ECmlje8R7lP9DOY=
github.com/algorand/go-deadlock v0.2.2/go.mod h1:Hat1OXKqKNUcN/iv74FjGhF4hsOE2l7gOgQ9ZVIq6Fk=
github.com/algorand/go-sumhash v0.1.0 h1:b/QRhyLuF//vOcicBIxBXYW8bERNoeLxieht/dUYpVg=
github.com/algorand/go-sumhash v0.1.0/go.mod h1:OOe7jdDWUhLkuP1XytkK5gnLu9entAviN5DfDZh6XAc=
github.com/algorand/graphtrace v0.1.0 h1:QemP1iT0W56SExD0NfiU6rsG34/v0Je6bg5UZnptEUM=
github.com/algorand/graphtrace v0.1.0/go.mod h1:HscLQrzBdH1BH+5oehs3ICd8SYcXvnSL9BjfTu8WHCc=
-github.com/algorand/msgp v1.1.53 h1:D6HKLyvLE6ltfsf8Apsrc+kqYb/CcOZEAfh1DpkPrNg=
-github.com/algorand/msgp v1.1.53/go.mod h1:5K3d58/poT5fPmtiwuQft6GjgSrVEM46KoXdLrID8ZU=
+github.com/algorand/msgp v1.1.55 h1:kWc9Xc08xtxCTWUiq1cRW5XGF+DFcfSGihYf0IZ/ivs=
+github.com/algorand/msgp v1.1.55/go.mod h1:RqZQBzAFDWpwh5TlabzZkWy+6kwL9cvXfLbU0gD99EA=
github.com/algorand/oapi-codegen v1.12.0-algorand.0 h1:W9PvED+wAJc+9EeXPONnA+0zE9UhynEqoDs4OgAxKhk=
github.com/algorand/oapi-codegen v1.12.0-algorand.0/go.mod h1:tIWJ9K/qrLDVDt5A1p82UmxZIEGxv2X+uoujdhEAL48=
+github.com/algorand/sortition v1.0.0 h1:PJiZtdSTBm4nArQrZXBnhlljHXhuyAXRJBqVWowQu3E=
+github.com/algorand/sortition v1.0.0/go.mod h1:23CZwAbTWPv0bBsq+Php/2J6Y/iXDyzlfcZyepeY5Fo=
github.com/algorand/websocket v1.4.6 h1:I0kV4EYwatuUrKtNiwzYYgojgwh6pksDmlqntKG2Woc=
github.com/algorand/websocket v1.4.6/go.mod h1:HJmdGzFtnlUQ4nTzZP6WrT29oGYf1t6Ybi64vROcT+M=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=
github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aws/aws-sdk-go v1.33.0 h1:Bq5Y6VTLbfnJp1IV8EL/qUU5qO1DYHda/zis/sqevkY=
github.com/aws/aws-sdk-go v1.33.0/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e h1:CHPYEbz71w8DqJ7DRIq+MXyCQsdibK08vdcQTY4ufas=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
+github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/consensys/gnark-crypto v0.7.0 h1:rwdy8+ssmLYRqKp+ryRRgQJl/rCq2uv+n83cOydm5UE=
github.com/consensys/gnark-crypto v0.7.0/go.mod h1:KPSuJzyxkJA8xZ/+CV47tyqkr9MmpZA3PXivK4VPrVg=
-github.com/cpuguy83/go-md2man v1.0.8 h1:DwoNytLphI8hzS2Af4D0dfaEaiSq2bN05mEm4R6vf8M=
-github.com/cpuguy83/go-md2man v1.0.8/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU=
+github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -36,12 +127,34 @@ github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018 h1:6xT9KW8zLC
github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4=
github.com/dchest/siphash v1.2.1 h1:4cLinnzVJDKxTCl9B01807Yiy+W7ZzVHj/KIroQRvT4=
github.com/dchest/siphash v1.2.1/go.mod h1:q+IRvb2gOSrUnYoPqHiyHXS0FOBBOdl6tONBlVnOnt4=
-github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
+github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
+github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
+github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
github.com/getkin/kin-openapi v0.107.0 h1:bxhL6QArW7BXQj8NjXfIJQy680NsMKd25nwhvpCXchg=
github.com/getkin/kin-openapi v0.107.0/go.mod h1:9Dhr+FasATJZjS4iOLvB0hkaxgYdulrNYm2e9epLWOo=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
@@ -49,19 +162,128 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/flock v0.7.0 h1:pGFUjl501gafK9HBt1VGL1KCOd/YhIooID+xgyJCf3g=
github.com/gofrs/flock v0.7.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
+github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
+github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
+github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
+github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
+github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
+github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
+github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
+github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
+github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
+github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
+github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/invopop/yaml v0.1.0 h1:YW3WGUoJEXYfzWBjn00zIlrw7brGVD0fUKRYDPAPhrc=
@@ -72,10 +294,23 @@ github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4=
github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@@ -86,116 +321,576 @@ github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
+github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/mattn/go-colorable v0.1.11 h1:nQ+aFkoE2TMGc0b68U2OKSexC+eq46+XwZzWXHRmPYs=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
+github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
+github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
+github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
-github.com/miekg/dns v1.1.27 h1:aEH/kqUzUxGJ/UHcEKdJY+ugH6WEzsEBBSPa8zuy1aM=
-github.com/miekg/dns v1.1.27/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
+github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY=
+github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
+github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/olivere/elastic v6.2.14+incompatible h1:k+KadwNP/dkXE0/eu+T6otk1+5fe0tEpPyQJ4XVm5i8=
github.com/olivere/elastic v6.2.14+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ=
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
-github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
-github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0=
+github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM=
github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
+go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc=
+golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 h1:Hir2P/De0WpUhtrKGGjvSb2YxUgyZ7EFOSLIcSSpiwE=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
+google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
+google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
+google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
+google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
+google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
+google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU=
+google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
+google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
+google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009 h1:q/fZgS8MMadqFFGa8WL4Oyz+TmjiZfi8UrzWhTl8d5w=
gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009/go.mod h1:O0bY1e/dSoxMYZYTHP0SWKxG5EWLEvKR9/cOjWPPMKU=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
@@ -203,3 +898,15 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+pgregory.net/rapid v0.6.2 h1:ErW5sL+UKtfBfUTsWHDCoeB+eZKLKMxrSd1VJY6W4bw=
+pgregory.net/rapid v0.6.2/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/installer/config.json.example b/installer/config.json.example
index 4ae34a571..7b6ceb532 100644
--- a/installer/config.json.example
+++ b/installer/config.json.example
@@ -1,5 +1,5 @@
{
- "Version": 27,
+ "Version": 28,
"AccountUpdatesStatsInterval": 5000000000,
"AccountsRebuildSynchronousMode": 1,
"AgreementIncomingBundlesQueueLength": 15,
@@ -9,6 +9,7 @@
"Archival": false,
"BaseLoggerDebugLevel": 4,
"BlockServiceCustomFallbackEndpoints": "",
+ "BlockServiceMemCap": 500000000,
"BroadcastConnectionsLimit": -1,
"CadaverDirectory": "",
"CadaverSizeTarget": 0,
@@ -24,7 +25,7 @@
"CatchupParallelBlocks": 16,
"ConnectionsRateLimitingCount": 60,
"ConnectionsRateLimitingWindowSeconds": 1,
- "DNSBootstrapID": "<network>.algorand.network",
+ "DNSBootstrapID": "<network>.algorand.network?backup=<network>.algorand.net&dedup=<name>.algorand-<network>.(network|net)",
"DNSSecurityFlags": 1,
"DeadlockDetection": 0,
"DeadlockDetectionThreshold": 30,
@@ -66,7 +67,6 @@
"IncomingConnectionsLimit": 2400,
"IncomingMessageFilterBucketCount": 5,
"IncomingMessageFilterBucketSize": 512,
- "IsIndexerActive": false,
"LedgerSynchronousMode": 2,
"LogArchiveMaxAge": "",
"LogArchiveName": "node.archive.log",
@@ -74,7 +74,7 @@
"MaxAPIBoxPerApplication": 100000,
"MaxAPIResourcesPerAccount": 100000,
"MaxAcctLookback": 4,
- "MaxCatchpointDownloadDuration": 7200000000000,
+ "MaxCatchpointDownloadDuration": 43200000000000,
"MaxConnectionsPerIP": 15,
"MinCatchpointFileDownloadBytesPerSecond": 20480,
"NetAddress": "",
@@ -98,6 +98,7 @@
"RestReadTimeoutSeconds": 15,
"RestWriteTimeoutSeconds": 120,
"RunHosted": false,
+ "StorageEngine": "sqlite",
"SuggestedFeeBlockHistory": 3,
"SuggestedFeeSlidingWindowSize": 50,
"TLSCertFile": "",
@@ -108,6 +109,7 @@
"TxBacklogReservedCapacityPerPeer": 20,
"TxBacklogServiceRateWindowSeconds": 10,
"TxBacklogSize": 26000,
+ "TxIncomingFilterMaxSize": 500000,
"TxIncomingFilteringFlags": 1,
"TxPoolExponentialIncreaseFactor": 2,
"TxPoolSize": 75000,
diff --git a/installer/debian/algorand-devtools/conffiles b/installer/debian/algorand-devtools/conffiles
index 382fb0fdf..09b275f2f 100644
--- a/installer/debian/algorand-devtools/conffiles
+++ b/installer/debian/algorand-devtools/conffiles
@@ -1,2 +1 @@
/etc/apt/apt.conf.d/53algorand-devtools-upgrades
-
diff --git a/internal/rapidgen/rapidgenerators.go b/internal/rapidgen/rapidgenerators.go
new file mode 100644
index 000000000..37b9b6988
--- /dev/null
+++ b/internal/rapidgen/rapidgenerators.go
@@ -0,0 +1,99 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package rapidgen
+
+// See https://github.com/flyingmutant/rapid/pull/18
+
+import (
+ "fmt"
+ "pgregory.net/rapid"
+ "strings"
+)
+
+// DomainWithPort generates an RFC 1035 compliant domain name with a port.
+func DomainWithPort() *rapid.Generator[string] {
+ return rapid.Custom(func(t *rapid.T) string {
+ return fmt.Sprintf("%s:%d", Domain().Draw(t, "domain"), rapid.IntRange(1, 65535).Draw(t, "port"))
+ })
+}
+
+// Domain generates an RFC 1035 compliant domain name.
+func Domain() *rapid.Generator[string] {
+ return DomainOf(255, 63, "", nil)
+}
+
+// DomainWithSuffixAndPort generates an RFC 1035 compliant domain name with the specified domain suffix (assumes compliant), taking a list of domains to not match.
+func DomainWithSuffixAndPort(suffix string, dontMatch []string) *rapid.Generator[string] {
+ return rapid.Custom(func(t *rapid.T) string {
+ return fmt.Sprintf("%s:%d", DomainOf(253, 63, suffix, dontMatch).Draw(t, "domain"),
+ rapid.IntRange(1, 65535).Draw(t, "port"))
+ })
+}
+
+// DomainOf generates an RFC 1035 compliant domain name,
+// with a maximum overall length of maxLength
+// a maximum number of elements of maxElements
+// and the specified domain suffix (assumes compliant).
+func DomainOf(maxLength, maxElementLength int, domainSuffix string, dontMatch []string) *rapid.Generator[string] {
+ assertf(4 <= maxLength, "maximum length (%v) should not be less than 4, to generate a two character domain and a one character subdomain", maxLength)
+ assertf(maxLength <= 255, "maximum length (%v) should not be greater than 255 to comply with RFC 1035", maxLength)
+ assertf(1 <= maxElementLength, "maximum element length (%v) should not be less than 1 to comply with RFC 1035", maxElementLength)
+ assertf(maxElementLength <= 63, "maximum element length (%v) should not be greater than 63 to comply with RFC 1035", maxElementLength)
+
+ genDomain := func() *rapid.Generator[string] {
+ return rapid.Custom(func(t *rapid.T) string {
+ var domain string
+ if domainSuffix != "" {
+ domain = domainSuffix
+ } else {
+ domain = fmt.Sprint(tldGenerator.
+ Filter(func(s string) bool { return len(s)+2 <= maxLength }).
+ Draw(t, "domain"))
+ }
+
+ expr := fmt.Sprintf(`[a-zA-Z]([a-zA-Z0-9\-]{0,%d}[a-zA-Z0-9])?`, maxElementLength-2)
+
+ el := rapid.IntRange(1, 126).Example()
+ for i := 0; i < el; i++ {
+ subDomain := fmt.Sprint(rapid.StringMatching(expr).Draw(t, "subdomain"))
+ if len(domain)+len(subDomain) >= maxLength {
+ break
+ }
+ domain = subDomain + "." + domain
+ }
+
+ return domain
+ })
+ }
+
+ return genDomain().Filter(func(domain string) bool {
+ for _, v := range dontMatch {
+ if strings.EqualFold(v, domain) {
+ return false
+ }
+ }
+ return true
+ })
+}
+
+var tldGenerator = rapid.SampledFrom(tlds)
+
+func assertf(ok bool, format string, args ...interface{}) {
+ if !ok {
+ panic(fmt.Sprintf(format, args...))
+ }
+}
diff --git a/internal/rapidgen/tld.go b/internal/rapidgen/tld.go
new file mode 100644
index 000000000..e6e3e8963
--- /dev/null
+++ b/internal/rapidgen/tld.go
@@ -0,0 +1,1508 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package rapidgen
+
+import "strings"
+
+// sourced from https://data.iana.org/TLD/tlds-alpha-by-domain.txt
+//
+//nolint:misspell // this is a list of TLDs from IANA - we do not have control over spelling
+const tldsByAlpha = `
+AAA
+AARP
+ABARTH
+ABB
+ABBOTT
+ABBVIE
+ABC
+ABLE
+ABOGADO
+ABUDHABI
+AC
+ACADEMY
+ACCENTURE
+ACCOUNTANT
+ACCOUNTANTS
+ACO
+ACTOR
+AD
+ADS
+ADULT
+AE
+AEG
+AERO
+AETNA
+AF
+AFL
+AFRICA
+AG
+AGAKHAN
+AGENCY
+AI
+AIG
+AIRBUS
+AIRFORCE
+AIRTEL
+AKDN
+AL
+ALFAROMEO
+ALIBABA
+ALIPAY
+ALLFINANZ
+ALLSTATE
+ALLY
+ALSACE
+ALSTOM
+AM
+AMAZON
+AMERICANEXPRESS
+AMERICANFAMILY
+AMEX
+AMFAM
+AMICA
+AMSTERDAM
+ANALYTICS
+ANDROID
+ANQUAN
+ANZ
+AO
+AOL
+APARTMENTS
+APP
+APPLE
+AQ
+AQUARELLE
+AR
+ARAB
+ARAMCO
+ARCHI
+ARMY
+ARPA
+ART
+ARTE
+AS
+ASDA
+ASIA
+ASSOCIATES
+AT
+ATHLETA
+ATTORNEY
+AU
+AUCTION
+AUDI
+AUDIBLE
+AUDIO
+AUSPOST
+AUTHOR
+AUTO
+AUTOS
+AVIANCA
+AW
+AWS
+AX
+AXA
+AZ
+AZURE
+BA
+BABY
+BAIDU
+BANAMEX
+BANANAREPUBLIC
+BAND
+BANK
+BAR
+BARCELONA
+BARCLAYCARD
+BARCLAYS
+BAREFOOT
+BARGAINS
+BASEBALL
+BASKETBALL
+BAUHAUS
+BAYERN
+BB
+BBC
+BBT
+BBVA
+BCG
+BCN
+BD
+BE
+BEATS
+BEAUTY
+BEER
+BENTLEY
+BERLIN
+BEST
+BESTBUY
+BET
+BF
+BG
+BH
+BHARTI
+BI
+BIBLE
+BID
+BIKE
+BING
+BINGO
+BIO
+BIZ
+BJ
+BLACK
+BLACKFRIDAY
+BLOCKBUSTER
+BLOG
+BLOOMBERG
+BLUE
+BM
+BMS
+BMW
+BN
+BNPPARIBAS
+BO
+BOATS
+BOEHRINGER
+BOFA
+BOM
+BOND
+BOO
+BOOK
+BOOKING
+BOSCH
+BOSTIK
+BOSTON
+BOT
+BOUTIQUE
+BOX
+BR
+BRADESCO
+BRIDGESTONE
+BROADWAY
+BROKER
+BROTHER
+BRUSSELS
+BS
+BT
+BUILD
+BUILDERS
+BUSINESS
+BUY
+BUZZ
+BV
+BW
+BY
+BZ
+BZH
+CA
+CAB
+CAFE
+CAL
+CALL
+CALVINKLEIN
+CAM
+CAMERA
+CAMP
+CANON
+CAPETOWN
+CAPITAL
+CAPITALONE
+CAR
+CARAVAN
+CARDS
+CARE
+CAREER
+CAREERS
+CARS
+CASA
+CASE
+CASH
+CASINO
+CAT
+CATERING
+CATHOLIC
+CBA
+CBN
+CBRE
+CBS
+CC
+CD
+CENTER
+CEO
+CERN
+CF
+CFA
+CFD
+CG
+CH
+CHANEL
+CHANNEL
+CHARITY
+CHASE
+CHAT
+CHEAP
+CHINTAI
+CHRISTMAS
+CHROME
+CHURCH
+CI
+CIPRIANI
+CIRCLE
+CISCO
+CITADEL
+CITI
+CITIC
+CITY
+CITYEATS
+CK
+CL
+CLAIMS
+CLEANING
+CLICK
+CLINIC
+CLINIQUE
+CLOTHING
+CLOUD
+CLUB
+CLUBMED
+CM
+CN
+CO
+COACH
+CODES
+COFFEE
+COLLEGE
+COLOGNE
+COM
+COMCAST
+COMMBANK
+COMMUNITY
+COMPANY
+COMPARE
+COMPUTER
+COMSEC
+CONDOS
+CONSTRUCTION
+CONSULTING
+CONTACT
+CONTRACTORS
+COOKING
+COOKINGCHANNEL
+COOL
+COOP
+CORSICA
+COUNTRY
+COUPON
+COUPONS
+COURSES
+CPA
+CR
+CREDIT
+CREDITCARD
+CREDITUNION
+CRICKET
+CROWN
+CRS
+CRUISE
+CRUISES
+CU
+CUISINELLA
+CV
+CW
+CX
+CY
+CYMRU
+CYOU
+CZ
+DABUR
+DAD
+DANCE
+DATA
+DATE
+DATING
+DATSUN
+DAY
+DCLK
+DDS
+DE
+DEAL
+DEALER
+DEALS
+DEGREE
+DELIVERY
+DELL
+DELOITTE
+DELTA
+DEMOCRAT
+DENTAL
+DENTIST
+DESI
+DESIGN
+DEV
+DHL
+DIAMONDS
+DIET
+DIGITAL
+DIRECT
+DIRECTORY
+DISCOUNT
+DISCOVER
+DISH
+DIY
+DJ
+DK
+DM
+DNP
+DO
+DOCS
+DOCTOR
+DOG
+DOMAINS
+DOT
+DOWNLOAD
+DRIVE
+DTV
+DUBAI
+DUNLOP
+DUPONT
+DURBAN
+DVAG
+DVR
+DZ
+EARTH
+EAT
+EC
+ECO
+EDEKA
+EDU
+EDUCATION
+EE
+EG
+EMAIL
+EMERCK
+ENERGY
+ENGINEER
+ENGINEERING
+ENTERPRISES
+EPSON
+EQUIPMENT
+ER
+ERICSSON
+ERNI
+ES
+ESQ
+ESTATE
+ET
+ETISALAT
+EU
+EUROVISION
+EUS
+EVENTS
+EXCHANGE
+EXPERT
+EXPOSED
+EXPRESS
+EXTRASPACE
+FAGE
+FAIL
+FAIRWINDS
+FAITH
+FAMILY
+FAN
+FANS
+FARM
+FARMERS
+FASHION
+FAST
+FEDEX
+FEEDBACK
+FERRARI
+FERRERO
+FI
+FIAT
+FIDELITY
+FIDO
+FILM
+FINAL
+FINANCE
+FINANCIAL
+FIRE
+FIRESTONE
+FIRMDALE
+FISH
+FISHING
+FIT
+FITNESS
+FJ
+FK
+FLICKR
+FLIGHTS
+FLIR
+FLORIST
+FLOWERS
+FLY
+FM
+FO
+FOO
+FOOD
+FOODNETWORK
+FOOTBALL
+FORD
+FOREX
+FORSALE
+FORUM
+FOUNDATION
+FOX
+FR
+FREE
+FRESENIUS
+FRL
+FROGANS
+FRONTDOOR
+FRONTIER
+FTR
+FUJITSU
+FUN
+FUND
+FURNITURE
+FUTBOL
+FYI
+GA
+GAL
+GALLERY
+GALLO
+GALLUP
+GAME
+GAMES
+GAP
+GARDEN
+GAY
+GB
+GBIZ
+GD
+GDN
+GE
+GEA
+GENT
+GENTING
+GEORGE
+GF
+GG
+GGEE
+GH
+GI
+GIFT
+GIFTS
+GIVES
+GIVING
+GL
+GLASS
+GLE
+GLOBAL
+GLOBO
+GM
+GMAIL
+GMBH
+GMO
+GMX
+GN
+GODADDY
+GOLD
+GOLDPOINT
+GOLF
+GOO
+GOODYEAR
+GOOG
+GOOGLE
+GOP
+GOT
+GOV
+GP
+GQ
+GR
+GRAINGER
+GRAPHICS
+GRATIS
+GREEN
+GRIPE
+GROCERY
+GROUP
+GS
+GT
+GU
+GUARDIAN
+GUCCI
+GUGE
+GUIDE
+GUITARS
+GURU
+GW
+GY
+HAIR
+HAMBURG
+HANGOUT
+HAUS
+HBO
+HDFC
+HDFCBANK
+HEALTH
+HEALTHCARE
+HELP
+HELSINKI
+HERE
+HERMES
+HGTV
+HIPHOP
+HISAMITSU
+HITACHI
+HIV
+HK
+HKT
+HM
+HN
+HOCKEY
+HOLDINGS
+HOLIDAY
+HOMEDEPOT
+HOMEGOODS
+HOMES
+HOMESENSE
+HONDA
+HORSE
+HOSPITAL
+HOST
+HOSTING
+HOT
+HOTELES
+HOTELS
+HOTMAIL
+HOUSE
+HOW
+HR
+HSBC
+HT
+HU
+HUGHES
+HYATT
+HYUNDAI
+IBM
+ICBC
+ICE
+ICU
+ID
+IE
+IEEE
+IFM
+IKANO
+IL
+IM
+IMAMAT
+IMDB
+IMMO
+IMMOBILIEN
+IN
+INC
+INDUSTRIES
+INFINITI
+INFO
+ING
+INK
+INSTITUTE
+INSURANCE
+INSURE
+INT
+INTERNATIONAL
+INTUIT
+INVESTMENTS
+IO
+IPIRANGA
+IQ
+IR
+IRISH
+IS
+ISMAILI
+IST
+ISTANBUL
+IT
+ITAU
+ITV
+JAGUAR
+JAVA
+JCB
+JE
+JEEP
+JETZT
+JEWELRY
+JIO
+JLL
+JM
+JMP
+JNJ
+JO
+JOBS
+JOBURG
+JOT
+JOY
+JP
+JPMORGAN
+JPRS
+JUEGOS
+JUNIPER
+KAUFEN
+KDDI
+KE
+KERRYHOTELS
+KERRYLOGISTICS
+KERRYPROPERTIES
+KFH
+KG
+KH
+KI
+KIA
+KIDS
+KIM
+KINDER
+KINDLE
+KITCHEN
+KIWI
+KM
+KN
+KOELN
+KOMATSU
+KOSHER
+KP
+KPMG
+KPN
+KR
+KRD
+KRED
+KUOKGROUP
+KW
+KY
+KYOTO
+KZ
+LA
+LACAIXA
+LAMBORGHINI
+LAMER
+LANCASTER
+LANCIA
+LAND
+LANDROVER
+LANXESS
+LASALLE
+LAT
+LATINO
+LATROBE
+LAW
+LAWYER
+LB
+LC
+LDS
+LEASE
+LECLERC
+LEFRAK
+LEGAL
+LEGO
+LEXUS
+LGBT
+LI
+LIDL
+LIFE
+LIFEINSURANCE
+LIFESTYLE
+LIGHTING
+LIKE
+LILLY
+LIMITED
+LIMO
+LINCOLN
+LINDE
+LINK
+LIPSY
+LIVE
+LIVING
+LK
+LLC
+LLP
+LOAN
+LOANS
+LOCKER
+LOCUS
+LOL
+LONDON
+LOTTE
+LOTTO
+LOVE
+LPL
+LPLFINANCIAL
+LR
+LS
+LT
+LTD
+LTDA
+LU
+LUNDBECK
+LUXE
+LUXURY
+LV
+LY
+MA
+MACYS
+MADRID
+MAIF
+MAISON
+MAKEUP
+MAN
+MANAGEMENT
+MANGO
+MAP
+MARKET
+MARKETING
+MARKETS
+MARRIOTT
+MARSHALLS
+MASERATI
+MATTEL
+MBA
+MC
+MCKINSEY
+MD
+ME
+MED
+MEDIA
+MEET
+MELBOURNE
+MEME
+MEMORIAL
+MEN
+MENU
+MERCKMSD
+MG
+MH
+MIAMI
+MICROSOFT
+MIL
+MINI
+MINT
+MIT
+MITSUBISHI
+MK
+ML
+MLB
+MLS
+MM
+MMA
+MN
+MO
+MOBI
+MOBILE
+MODA
+MOE
+MOI
+MOM
+MONASH
+MONEY
+MONSTER
+MORMON
+MORTGAGE
+MOSCOW
+MOTO
+MOTORCYCLES
+MOV
+MOVIE
+MP
+MQ
+MR
+MS
+MSD
+MT
+MTN
+MTR
+MU
+MUSEUM
+MUSIC
+MUTUAL
+MV
+MW
+MX
+MY
+MZ
+NA
+NAB
+NAGOYA
+NAME
+NATURA
+NAVY
+NBA
+NC
+NE
+NEC
+NET
+NETBANK
+NETFLIX
+NETWORK
+NEUSTAR
+NEW
+NEWS
+NEXT
+NEXTDIRECT
+NEXUS
+NF
+NFL
+NG
+NGO
+NHK
+NI
+NICO
+NIKE
+NIKON
+NINJA
+NISSAN
+NISSAY
+NL
+NO
+NOKIA
+NORTHWESTERNMUTUAL
+NORTON
+NOW
+NOWRUZ
+NOWTV
+NP
+NR
+NRA
+NRW
+NTT
+NU
+NYC
+NZ
+OBI
+OBSERVER
+OFFICE
+OKINAWA
+OLAYAN
+OLAYANGROUP
+OLDNAVY
+OLLO
+OM
+OMEGA
+ONE
+ONG
+ONL
+ONLINE
+OOO
+OPEN
+ORACLE
+ORANGE
+ORG
+ORGANIC
+ORIGINS
+OSAKA
+OTSUKA
+OTT
+OVH
+PA
+PAGE
+PANASONIC
+PARIS
+PARS
+PARTNERS
+PARTS
+PARTY
+PASSAGENS
+PAY
+PCCW
+PE
+PET
+PF
+PFIZER
+PG
+PH
+PHARMACY
+PHD
+PHILIPS
+PHONE
+PHOTO
+PHOTOGRAPHY
+PHOTOS
+PHYSIO
+PICS
+PICTET
+PICTURES
+PID
+PIN
+PING
+PINK
+PIONEER
+PIZZA
+PK
+PL
+PLACE
+PLAY
+PLAYSTATION
+PLUMBING
+PLUS
+PM
+PN
+PNC
+POHL
+POKER
+POLITIE
+PORN
+POST
+PR
+PRAMERICA
+PRAXI
+PRESS
+PRIME
+PRO
+PROD
+PRODUCTIONS
+PROF
+PROGRESSIVE
+PROMO
+PROPERTIES
+PROPERTY
+PROTECTION
+PRU
+PRUDENTIAL
+PS
+PT
+PUB
+PW
+PWC
+PY
+QA
+QPON
+QUEBEC
+QUEST
+RACING
+RADIO
+RE
+READ
+REALESTATE
+REALTOR
+REALTY
+RECIPES
+RED
+REDSTONE
+REDUMBRELLA
+REHAB
+REISE
+REISEN
+REIT
+RELIANCE
+REN
+RENT
+RENTALS
+REPAIR
+REPORT
+REPUBLICAN
+REST
+RESTAURANT
+REVIEW
+REVIEWS
+REXROTH
+RICH
+RICHARDLI
+RICOH
+RIL
+RIO
+RIP
+RO
+ROCHER
+ROCKS
+RODEO
+ROGERS
+ROOM
+RS
+RSVP
+RU
+RUGBY
+RUHR
+RUN
+RW
+RWE
+RYUKYU
+SA
+SAARLAND
+SAFE
+SAFETY
+SAKURA
+SALE
+SALON
+SAMSCLUB
+SAMSUNG
+SANDVIK
+SANDVIKCOROMANT
+SANOFI
+SAP
+SARL
+SAS
+SAVE
+SAXO
+SB
+SBI
+SBS
+SC
+SCA
+SCB
+SCHAEFFLER
+SCHMIDT
+SCHOLARSHIPS
+SCHOOL
+SCHULE
+SCHWARZ
+SCIENCE
+SCOT
+SD
+SE
+SEARCH
+SEAT
+SECURE
+SECURITY
+SEEK
+SELECT
+SENER
+SERVICES
+SEVEN
+SEW
+SEX
+SEXY
+SFR
+SG
+SH
+SHANGRILA
+SHARP
+SHAW
+SHELL
+SHIA
+SHIKSHA
+SHOES
+SHOP
+SHOPPING
+SHOUJI
+SHOW
+SHOWTIME
+SI
+SILK
+SINA
+SINGLES
+SITE
+SJ
+SK
+SKI
+SKIN
+SKY
+SKYPE
+SL
+SLING
+SM
+SMART
+SMILE
+SN
+SNCF
+SO
+SOCCER
+SOCIAL
+SOFTBANK
+SOFTWARE
+SOHU
+SOLAR
+SOLUTIONS
+SONG
+SONY
+SOY
+SPA
+SPACE
+SPORT
+SPOT
+SR
+SRL
+SS
+ST
+STADA
+STAPLES
+STAR
+STATEBANK
+STATEFARM
+STC
+STCGROUP
+STOCKHOLM
+STORAGE
+STORE
+STREAM
+STUDIO
+STUDY
+STYLE
+SU
+SUCKS
+SUPPLIES
+SUPPLY
+SUPPORT
+SURF
+SURGERY
+SUZUKI
+SV
+SWATCH
+SWISS
+SX
+SY
+SYDNEY
+SYSTEMS
+SZ
+TAB
+TAIPEI
+TALK
+TAOBAO
+TARGET
+TATAMOTORS
+TATAR
+TATTOO
+TAX
+TAXI
+TC
+TCI
+TD
+TDK
+TEAM
+TECH
+TECHNOLOGY
+TEL
+TEMASEK
+TENNIS
+TEVA
+TF
+TG
+TH
+THD
+THEATER
+THEATRE
+TIAA
+TICKETS
+TIENDA
+TIFFANY
+TIPS
+TIRES
+TIROL
+TJ
+TJMAXX
+TJX
+TK
+TKMAXX
+TL
+TM
+TMALL
+TN
+TO
+TODAY
+TOKYO
+TOOLS
+TOP
+TORAY
+TOSHIBA
+TOTAL
+TOURS
+TOWN
+TOYOTA
+TOYS
+TR
+TRADE
+TRADING
+TRAINING
+TRAVEL
+TRAVELCHANNEL
+TRAVELERS
+TRAVELERSINSURANCE
+TRUST
+TRV
+TT
+TUBE
+TUI
+TUNES
+TUSHU
+TV
+TVS
+TW
+TZ
+UA
+UBANK
+UBS
+UG
+UK
+UNICOM
+UNIVERSITY
+UNO
+UOL
+UPS
+US
+UY
+UZ
+VA
+VACATIONS
+VANA
+VANGUARD
+VC
+VE
+VEGAS
+VENTURES
+VERISIGN
+VERSICHERUNG
+VET
+VG
+VI
+VIAJES
+VIDEO
+VIG
+VIKING
+VILLAS
+VIN
+VIP
+VIRGIN
+VISA
+VISION
+VIVA
+VIVO
+VLAANDEREN
+VN
+VODKA
+VOLKSWAGEN
+VOLVO
+VOTE
+VOTING
+VOTO
+VOYAGE
+VU
+VUELOS
+WALES
+WALMART
+WALTER
+WANG
+WANGGOU
+WATCH
+WATCHES
+WEATHER
+WEATHERCHANNEL
+WEBCAM
+WEBER
+WEBSITE
+WED
+WEDDING
+WEIBO
+WEIR
+WF
+WHOSWHO
+WIEN
+WIKI
+WILLIAMHILL
+WIN
+WINDOWS
+WINE
+WINNERS
+WME
+WOLTERSKLUWER
+WOODSIDE
+WORK
+WORKS
+WORLD
+WOW
+WS
+WTC
+WTF
+XBOX
+XEROX
+XFINITY
+XIHUAN
+XIN
+XN--11B4C3D
+XN--1CK2E1B
+XN--1QQW23A
+XN--2SCRJ9C
+XN--30RR7Y
+XN--3BST00M
+XN--3DS443G
+XN--3E0B707E
+XN--3HCRJ9C
+XN--3PXU8K
+XN--42C2D9A
+XN--45BR5CYL
+XN--45BRJ9C
+XN--45Q11C
+XN--4DBRK0CE
+XN--4GBRIM
+XN--54B7FTA0CC
+XN--55QW42G
+XN--55QX5D
+XN--5SU34J936BGSG
+XN--5TZM5G
+XN--6FRZ82G
+XN--6QQ986B3XL
+XN--80ADXHKS
+XN--80AO21A
+XN--80AQECDR1A
+XN--80ASEHDB
+XN--80ASWG
+XN--8Y0A063A
+XN--90A3AC
+XN--90AE
+XN--90AIS
+XN--9DBQ2A
+XN--9ET52U
+XN--9KRT00A
+XN--B4W605FERD
+XN--BCK1B9A5DRE4C
+XN--C1AVG
+XN--C2BR7G
+XN--CCK2B3B
+XN--CCKWCXETD
+XN--CG4BKI
+XN--CLCHC0EA0B2G2A9GCD
+XN--CZR694B
+XN--CZRS0T
+XN--CZRU2D
+XN--D1ACJ3B
+XN--D1ALF
+XN--E1A4C
+XN--ECKVDTC9D
+XN--EFVY88H
+XN--FCT429K
+XN--FHBEI
+XN--FIQ228C5HS
+XN--FIQ64B
+XN--FIQS8S
+XN--FIQZ9S
+XN--FJQ720A
+XN--FLW351E
+XN--FPCRJ9C3D
+XN--FZC2C9E2C
+XN--FZYS8D69UVGM
+XN--G2XX48C
+XN--GCKR3F0F
+XN--GECRJ9C
+XN--GK3AT1E
+XN--H2BREG3EVE
+XN--H2BRJ9C
+XN--H2BRJ9C8C
+XN--HXT814E
+XN--I1B6B1A6A2E
+XN--IMR513N
+XN--IO0A7I
+XN--J1AEF
+XN--J1AMH
+XN--J6W193G
+XN--JLQ480N2RG
+XN--JVR189M
+XN--KCRX77D1X4A
+XN--KPRW13D
+XN--KPRY57D
+XN--KPUT3I
+XN--L1ACC
+XN--LGBBAT1AD8J
+XN--MGB9AWBF
+XN--MGBA3A3EJT
+XN--MGBA3A4F16A
+XN--MGBA7C0BBN0A
+XN--MGBAAKC7DVF
+XN--MGBAAM7A8H
+XN--MGBAB2BD
+XN--MGBAH1A3HJKRD
+XN--MGBAI9AZGQP6J
+XN--MGBAYH7GPA
+XN--MGBBH1A
+XN--MGBBH1A71E
+XN--MGBC0A9AZCG
+XN--MGBCA7DZDO
+XN--MGBCPQ6GPA1A
+XN--MGBERP4A5D4AR
+XN--MGBGU82A
+XN--MGBI4ECEXP
+XN--MGBPL2FH
+XN--MGBT3DHD
+XN--MGBTX2B
+XN--MGBX4CD0AB
+XN--MIX891F
+XN--MK1BU44C
+XN--MXTQ1M
+XN--NGBC5AZD
+XN--NGBE9E0A
+XN--NGBRX
+XN--NODE
+XN--NQV7F
+XN--NQV7FS00EMA
+XN--NYQY26A
+XN--O3CW4H
+XN--OGBPF8FL
+XN--OTU796D
+XN--P1ACF
+XN--P1AI
+XN--PGBS0DH
+XN--PSSY2U
+XN--Q7CE6A
+XN--Q9JYB4C
+XN--QCKA1PMC
+XN--QXA6A
+XN--QXAM
+XN--RHQV96G
+XN--ROVU88B
+XN--RVC1E0AM3E
+XN--S9BRJ9C
+XN--SES554G
+XN--T60B56A
+XN--TCKWE
+XN--TIQ49XQYJ
+XN--UNUP4Y
+XN--VERMGENSBERATER-CTB
+XN--VERMGENSBERATUNG-PWB
+XN--VHQUV
+XN--VUQ861B
+XN--W4R85EL8FHU5DNRA
+XN--W4RS40L
+XN--WGBH1C
+XN--WGBL6A
+XN--XHQ521B
+XN--XKC2AL3HYE2A
+XN--XKC2DL3A5EE0H
+XN--Y9A3AQ
+XN--YFRO4I67O
+XN--YGBI2AMMX
+XN--ZFR164B
+XXX
+XYZ
+YACHTS
+YAHOO
+YAMAXUN
+YANDEX
+YE
+YODOBASHI
+YOGA
+YOKOHAMA
+YOU
+YOUTUBE
+YT
+YUN
+ZA
+ZAPPOS
+ZARA
+ZERO
+ZIP
+ZM
+ZONE
+ZUERICH
+ZW
+`
+
+var tlds []string = strings.Split(strings.TrimSpace(tldsByAlpha), "\n")
diff --git a/ledger/acctdeltas.go b/ledger/acctdeltas.go
index e587a8fcf..29ec14a05 100644
--- a/ledger/acctdeltas.go
+++ b/ledger/acctdeltas.go
@@ -298,7 +298,7 @@ func (a *compactResourcesDeltas) resourcesLoadOld(tx trackerdb.TransactionScope,
if len(a.misses) == 0 {
return nil
}
- arw, err := tx.MakeAccountsReaderWriter()
+ ar, err := tx.MakeAccountsReader()
if err != nil {
return err
}
@@ -317,9 +317,9 @@ func (a *compactResourcesDeltas) resourcesLoadOld(tx trackerdb.TransactionScope,
if delta.oldResource.AcctRef != nil {
acctRef = delta.oldResource.AcctRef
} else if acctRef, ok = knownAddresses[addr]; !ok {
- acctRef, err = arw.LookupAccountRowID(addr)
+ acctRef, err = ar.LookupAccountRowID(addr)
if err != nil {
- if err != sql.ErrNoRows {
+ if err != sql.ErrNoRows || err != trackerdb.ErrNotFound {
err = fmt.Errorf("base account cannot be read while processing resource for addr=%s, aidx=%d: %w", addr.String(), aidx, err)
return err
@@ -330,7 +330,7 @@ func (a *compactResourcesDeltas) resourcesLoadOld(tx trackerdb.TransactionScope,
continue
}
}
- resDataBuf, err = arw.LookupResourceDataByAddrID(acctRef, aidx)
+ resDataBuf, err = ar.LookupResourceDataByAddrID(acctRef, aidx)
switch err {
case nil:
if len(resDataBuf) > 0 {
@@ -344,6 +344,10 @@ func (a *compactResourcesDeltas) resourcesLoadOld(tx trackerdb.TransactionScope,
err = fmt.Errorf("empty resource record: addrid=%d, aidx=%d", acctRef, aidx)
return err
}
+ case trackerdb.ErrNotFound:
+ // we don't have that account, just return an empty record.
+ a.updateOld(missIdx, trackerdb.PersistedResourcesData{AcctRef: acctRef, Aidx: aidx})
+ err = nil
case sql.ErrNoRows:
// we don't have that account, just return an empty record.
a.updateOld(missIdx, trackerdb.PersistedResourcesData{AcctRef: acctRef, Aidx: aidx})
@@ -466,7 +470,7 @@ func (a *compactAccountDeltas) accountsLoadOld(tx trackerdb.TransactionScope) (e
if len(a.misses) == 0 {
return nil
}
- arw, err := tx.MakeAccountsReaderWriter()
+ arw, err := tx.MakeAccountsOptimizedReader()
if err != nil {
return err
}
@@ -476,29 +480,13 @@ func (a *compactAccountDeltas) accountsLoadOld(tx trackerdb.TransactionScope) (e
}()
for _, idx := range a.misses {
addr := a.deltas[idx].address
- ref, acctDataBuf, err := arw.LookupAccountDataByAddress(addr)
- switch err {
- case nil:
- if len(acctDataBuf) > 0 {
- persistedAcctData := &trackerdb.PersistedAccountData{Addr: addr, Ref: ref}
- err = protocol.Decode(acctDataBuf, &persistedAcctData.AccountData)
- if err != nil {
- return err
- }
- a.updateOld(idx, *persistedAcctData)
- } else {
- // to retain backward compatibility, we will treat this condition as if we don't have the account.
- a.updateOld(idx, trackerdb.PersistedAccountData{Addr: addr, Ref: ref})
- }
- case sql.ErrNoRows:
- // we don't have that account, just return an empty record.
- a.updateOld(idx, trackerdb.PersistedAccountData{Addr: addr})
- // Note: the err will be ignored in this case since `err` is being shadowed.
- // this behaviour is equivalent to `err = nil`
- default:
+ data, err := arw.LookupAccount(addr)
+ if err != nil {
// unexpected error - let the caller know that we couldn't complete the operation.
return err
}
+ // update the account
+ a.updateOld(idx, data)
}
return
}
@@ -607,7 +595,7 @@ func (a *compactOnlineAccountDeltas) accountsLoadOld(tx trackerdb.TransactionSco
if len(a.misses) == 0 {
return nil
}
- arw, err := tx.MakeAccountsReaderWriter()
+ ar, err := tx.MakeAccountsReader()
if err != nil {
return err
}
@@ -616,7 +604,7 @@ func (a *compactOnlineAccountDeltas) accountsLoadOld(tx trackerdb.TransactionSco
}()
for _, idx := range a.misses {
addr := a.deltas[idx].address
- ref, acctDataBuf, err := arw.LookupOnlineAccountDataByAddress(addr)
+ ref, acctDataBuf, err := ar.LookupOnlineAccountDataByAddress(addr)
switch err {
case nil:
if len(acctDataBuf) > 0 {
@@ -630,6 +618,10 @@ func (a *compactOnlineAccountDeltas) accountsLoadOld(tx trackerdb.TransactionSco
// empty data means offline account
a.updateOld(idx, trackerdb.PersistedOnlineAccountData{Addr: addr, Ref: ref})
}
+ case trackerdb.ErrNotFound:
+ // we don't have that account, just return an empty record.
+ a.updateOld(idx, trackerdb.PersistedOnlineAccountData{Addr: addr})
+ // TODO: phase out sql.ErrNoRows
case sql.ErrNoRows:
// we don't have that account, just return an empty record.
a.updateOld(idx, trackerdb.PersistedOnlineAccountData{Addr: addr})
diff --git a/ledger/acctdeltas_test.go b/ledger/acctdeltas_test.go
index 2dc748225..cb259ea18 100644
--- a/ledger/acctdeltas_test.go
+++ b/ledger/acctdeltas_test.go
@@ -52,14 +52,14 @@ import (
)
func checkAccounts(t *testing.T, tx trackerdb.TransactionScope, rnd basics.Round, accts map[basics.Address]basics.AccountData) {
- arw, err := tx.MakeAccountsReaderWriter()
+ ar, err := tx.MakeAccountsReader()
require.NoError(t, err)
- r, err := arw.AccountsRound()
+ r, err := ar.AccountsRound()
require.NoError(t, err)
require.Equal(t, r, rnd)
- aor, err := tx.Testing().MakeAccountsOptimizedReader()
+ aor, err := tx.MakeAccountsOptimizedReader()
require.NoError(t, err)
var totalOnline, totalOffline, totalNotPart uint64
@@ -83,11 +83,11 @@ func checkAccounts(t *testing.T, tx trackerdb.TransactionScope, rnd basics.Round
}
}
- all, err := arw.Testing().AccountsAllTest()
+ all, err := ar.Testing().AccountsAllTest()
require.NoError(t, err)
require.Equal(t, all, accts)
- totals, err := arw.AccountsTotals(context.Background(), false)
+ totals, err := ar.AccountsTotals(context.Background(), false)
require.NoError(t, err)
require.Equal(t, totalOnline, totals.Online.Money.Raw, "mismatching total online money")
require.Equal(t, totalOffline, totals.Offline.Money.Raw)
@@ -129,7 +129,7 @@ func checkAccounts(t *testing.T, tx trackerdb.TransactionScope, rnd basics.Round
})
for i := 0; i < len(onlineAccounts); i++ {
- dbtop, err := arw.AccountsOnlineTop(rnd, 0, uint64(i), proto)
+ dbtop, err := ar.AccountsOnlineTop(rnd, 0, uint64(i), proto)
require.NoError(t, err)
require.Equal(t, i, len(dbtop))
@@ -139,7 +139,7 @@ func checkAccounts(t *testing.T, tx trackerdb.TransactionScope, rnd basics.Round
}
}
- top, err := arw.AccountsOnlineTop(rnd, 0, uint64(len(onlineAccounts)+1), proto)
+ top, err := ar.AccountsOnlineTop(rnd, 0, uint64(len(onlineAccounts)+1), proto)
require.NoError(t, err)
require.Equal(t, len(top), len(onlineAccounts))
}
@@ -149,8 +149,7 @@ func TestAccountDBInit(t *testing.T) {
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- dbs, _ := sqlitedriver.DbOpenTrackerTest(t, true)
- dbs.SetLogger(logging.TestingLog(t))
+ dbs, _ := sqlitedriver.OpenForTesting(t, true)
defer dbs.Close()
err := dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
@@ -210,20 +209,21 @@ func TestAccountDBRound(t *testing.T) {
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- dbs, _ := sqlitedriver.DbOpenTrackerTest(t, true)
- dbs.SetLogger(logging.TestingLog(t))
+ dbs, _ := sqlitedriver.OpenForTesting(t, true)
defer dbs.Close()
dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error {
- arw, err := tx.MakeAccountsReaderWriter()
+ ar, err := tx.MakeAccountsReader()
+ require.NoError(t, err)
+ aw, err := tx.MakeAccountsWriter()
require.NoError(t, err)
accts := ledgertesting.RandomAccounts(20, true)
tx.Testing().AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion)
checkAccounts(t, tx, 0, accts)
- totals, err := arw.AccountsTotals(context.Background(), false)
+ totals, err := ar.AccountsTotals(context.Background(), false)
require.NoError(t, err)
- expectedOnlineRoundParams, endRound, err := arw.AccountsOnlineRoundParams()
+ expectedOnlineRoundParams, endRound, err := ar.AccountsOnlineRoundParams()
require.NoError(t, err)
require.Equal(t, 1, len(expectedOnlineRoundParams))
require.Equal(t, 0, int(endRound))
@@ -269,10 +269,10 @@ func TestAccountDBRound(t *testing.T) {
err = resourceUpdatesCnt.resourcesLoadOld(tx, knownAddresses)
require.NoError(t, err)
- err = arw.AccountsPutTotals(totals, false)
+ err = aw.AccountsPutTotals(totals, false)
require.NoError(t, err)
onlineRoundParams := ledgercore.OnlineRoundParamsData{RewardsLevel: totals.RewardsLevel, OnlineSupply: totals.Online.Money.Raw, CurrentProtocol: protocol.ConsensusCurrentVersion}
- err = arw.AccountsPutOnlineRoundParams([]ledgercore.OnlineRoundParamsData{onlineRoundParams}, basics.Round(i))
+ err = aw.AccountsPutOnlineRoundParams([]ledgercore.OnlineRoundParamsData{onlineRoundParams}, basics.Round(i))
require.NoError(t, err)
expectedOnlineRoundParams = append(expectedOnlineRoundParams, onlineRoundParams)
@@ -289,7 +289,7 @@ func TestAccountDBRound(t *testing.T) {
updatedOnlineAccts, err := onlineAccountsNewRound(tx, updatesOnlineCnt, proto, basics.Round(i))
require.NoError(t, err)
- err = arw.UpdateAccountsRound(basics.Round(i))
+ err = aw.UpdateAccountsRound(basics.Round(i))
require.NoError(t, err)
// TODO: calculate exact number of updates?
@@ -297,7 +297,7 @@ func TestAccountDBRound(t *testing.T) {
require.NotEmpty(t, updatedOnlineAccts)
checkAccounts(t, tx, basics.Round(i), accts)
- arw.Testing().CheckCreatablesTest(t, i, expectedDbImage)
+ ar.Testing().CheckCreatablesTest(t, i, expectedDbImage)
}
// test the accounts totals
@@ -307,11 +307,11 @@ func TestAccountDBRound(t *testing.T) {
}
expectedTotals := ledgertesting.CalculateNewRoundAccountTotals(t, updates, 0, proto, nil, ledgercore.AccountTotals{})
- actualTotals, err := arw.AccountsTotals(context.Background(), false)
+ actualTotals, err := ar.AccountsTotals(context.Background(), false)
require.NoError(t, err)
require.Equal(t, expectedTotals, actualTotals)
- actualOnlineRoundParams, endRound, err := arw.AccountsOnlineRoundParams()
+ actualOnlineRoundParams, endRound, err := ar.AccountsOnlineRoundParams()
require.NoError(t, err)
require.Equal(t, expectedOnlineRoundParams, actualOnlineRoundParams)
require.Equal(t, 9, int(endRound))
@@ -321,7 +321,7 @@ func TestAccountDBRound(t *testing.T) {
acctCb := func(addr basics.Address, data basics.AccountData) {
loaded[addr] = data
}
- count, err := arw.LoadAllFullAccounts(context.Background(), "accountbase", "resources", acctCb)
+ count, err := ar.LoadAllFullAccounts(context.Background(), "accountbase", "resources", acctCb)
require.NoError(t, err)
require.Equal(t, count, len(accts))
require.Equal(t, count, len(loaded))
@@ -366,8 +366,7 @@ func TestAccountDBInMemoryAcct(t *testing.T) {
for i, test := range tests {
- dbs, _ := sqlitedriver.DbOpenTrackerTest(t, true)
- dbs.SetLogger(logging.TestingLog(t))
+ dbs, _ := sqlitedriver.OpenForTesting(t, true)
defer dbs.Close()
dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error {
@@ -437,8 +436,7 @@ func TestAccountDBInMemoryAcct(t *testing.T) {
func TestAccountStorageWithStateProofID(t *testing.T) {
partitiontest.PartitionTest(t)
- dbs, _ := sqlitedriver.DbOpenTrackerTest(t, true)
- dbs.SetLogger(logging.TestingLog(t))
+ dbs, _ := sqlitedriver.OpenForTesting(t, true)
defer dbs.Close()
dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
@@ -604,29 +602,21 @@ func benchmarkInitBalances(b testing.TB, numAccounts int, tx trackerdb.Transacti
return
}
-func cleanupTestDb(dbs db.Pair, dbName string, inMemory bool) {
- dbs.Close()
- if !inMemory {
- os.Remove(dbName)
- }
-}
-
func benchmarkReadingAllBalances(b *testing.B, inMemory bool) {
- dbs, _ := sqlitedriver.DbOpenTrackerTest(b, true)
- dbs.SetLogger(logging.TestingLog(b))
+ dbs, _ := sqlitedriver.OpenForTesting(b, true)
defer dbs.Close()
bal := make(map[basics.Address]basics.AccountData)
err := dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
benchmarkInitBalances(b, b.N, tx, protocol.ConsensusCurrentVersion)
- arw, err := tx.MakeAccountsReaderWriter()
+ ar, err := tx.MakeAccountsReader()
if err != nil {
return err
}
b.ResetTimer()
// read all the balances in the database.
var err2 error
- bal, err2 = arw.Testing().AccountsAllTest()
+ bal, err2 = ar.Testing().AccountsAllTest()
require.NoError(b, err2)
return nil
})
@@ -648,9 +638,8 @@ func BenchmarkReadingAllBalancesDisk(b *testing.B) {
}
func benchmarkReadingRandomBalances(b *testing.B, inMemory bool) {
- dbs, fn := sqlitedriver.DbOpenTrackerTest(b, true)
- dbs.SetLogger(logging.TestingLog(b))
- defer dbs.CleanupTest(fn, inMemory)
+ dbs, _ := sqlitedriver.OpenForTesting(b, true)
+ defer dbs.Close()
err := dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
accounts := benchmarkInitBalances(b, b.N, tx, protocol.ConsensusCurrentVersion)
@@ -687,30 +676,6 @@ func BenchmarkReadingRandomBalancesDisk(b *testing.B) {
benchmarkReadingRandomBalances(b, false)
}
-// TestAccountsDbQueriesCreateClose tests to see that we can create the accountsDbQueries and close it.
-// it also verify that double-closing it doesn't create an issue.
-func TestAccountsDbQueriesCreateClose(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- dbs, _ := storetesting.DbOpenTest(t, true)
- storetesting.SetDbLogging(t, dbs)
- defer dbs.Close()
-
- err := dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- sqlitedriver.AccountsInitTest(t, tx, make(map[basics.Address]basics.AccountData), protocol.ConsensusCurrentVersion)
- return nil
- })
- require.NoError(t, err)
- qs, err := sqlitedriver.AccountsInitDbQueries(dbs.Rdb.Handle)
- require.NoError(t, err)
- // TODO[store-refactor]: internals are opaque, once we move the the remainder of accountdb we can mvoe this too
- // require.NotNil(t, qs.listCreatablesStmt)
- qs.Close()
- // require.Nil(t, qs.listCreatablesStmt)
- qs.Close()
- // require.Nil(t, qs.listCreatablesStmt)
-}
-
func benchmarkWriteCatchpointStagingBalancesSub(b *testing.B, ascendingOrder bool) {
proto := config.Consensus[protocol.ConsensusCurrentVersion]
genesisInitState, _ := ledgertesting.GenerateInitState(b, protocol.ConsensusCurrentVersion, 100)
@@ -771,7 +736,7 @@ func benchmarkWriteCatchpointStagingBalancesSub(b *testing.B, ascendingOrder boo
normalizedAccountBalances, err := prepareNormalizedBalancesV6(chunk.Balances, proto)
require.NoError(b, err)
b.StartTimer()
- err = l.trackerDBs.Batch(func(ctx context.Context, tx trackerdb.BatchScope) (err error) {
+ err = l.trackerDBs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
cw, err := tx.MakeCatchpointWriter()
if err != nil {
return err
@@ -793,6 +758,7 @@ func benchmarkWriteCatchpointStagingBalancesSub(b *testing.B, ascendingOrder boo
b.ReportMetric(float64(b.N)/float64((time.Since(accountsWritingStarted)-accountsGenerationDuration).Seconds()), "accounts/sec")
}
+//nolint:staticcheck // intentionally setting b.N
func BenchmarkWriteCatchpointStagingBalances(b *testing.B) {
benchSizes := []int{1024 * 100, 1024 * 200, 1024 * 400}
for _, size := range benchSizes {
@@ -813,9 +779,8 @@ func TestLookupKeysByPrefix(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- dbs, fn := sqlitedriver.DbOpenTrackerTest(t, false)
- dbs.SetLogger(logging.TestingLog(t))
- defer dbs.CleanupTest(fn, false)
+ dbs, _ := sqlitedriver.OpenForTesting(t, false)
+ defer dbs.Close()
err := dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
// return account data, initialize DB tables from AccountsInitTest
@@ -1000,9 +965,8 @@ func TestLookupKeysByPrefix(t *testing.T) {
func BenchmarkLookupKeyByPrefix(b *testing.B) {
// learn something from BenchmarkWritingRandomBalancesDisk
- dbs, fn := sqlitedriver.DbOpenTrackerTest(b, false)
- dbs.SetLogger(logging.TestingLog(b))
- defer dbs.CleanupTest(fn, false)
+ dbs, _ := sqlitedriver.OpenForTesting(b, false)
+ defer dbs.Close()
err := dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
// return account data, initialize DB tables from AccountsInitTest
@@ -1131,10 +1095,10 @@ func TestKVStoreNilBlobConversion(t *testing.T) {
err = dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err0 error) {
writer, err0 := sqlitedriver.MakeAccountsSQLWriter(tx, false, false, true, false)
- defer writer.Close()
if err0 != nil {
return
}
+ defer writer.Close()
for i := 0; i < len(kvPairDBPrepareSet); i++ {
err0 = writer.UpsertKvPair(string(kvPairDBPrepareSet[i].key), nil)
if err0 != nil {
@@ -1187,11 +1151,8 @@ func TestKVStoreNilBlobConversion(t *testing.T) {
// | Section 4: Run migration to see replace nils with empty byte slices |
// +---------------------------------------------------------------------+
- trackerDBWrapper := sqlitedriver.CreateTrackerSQLStore(dbs)
- err = trackerDBWrapper.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err0 error) {
- _, err0 = tx.RunMigrations(ctx, trackerdb.Params{}, log, targetVersion)
- return
- })
+ trackerDBWrapper := sqlitedriver.MakeStore(dbs)
+ _, err = trackerDBWrapper.RunMigrations(context.Background(), trackerdb.Params{}, log, targetVersion)
require.NoError(t, err)
// +------------------------------------------------------------------------------------------------+
@@ -1420,8 +1381,7 @@ func TestCompactResourceDeltas(t *testing.T) {
func TestLookupAccountAddressFromAddressID(t *testing.T) {
partitiontest.PartitionTest(t)
- dbs, _ := sqlitedriver.DbOpenTrackerTest(t, true)
- dbs.SetLogger(logging.TestingLog(t))
+ dbs, _ := sqlitedriver.OpenForTesting(t, true)
defer dbs.Close()
addrs := make([]basics.Address, 100)
@@ -1449,13 +1409,13 @@ func TestLookupAccountAddressFromAddressID(t *testing.T) {
require.NoError(t, err)
err = dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
- arw, err := tx.MakeAccountsReaderWriter()
+ ar, err := tx.MakeAccountsReader()
if err != nil {
return err
}
for addr, addrid := range addrsids {
- retAddr, err := arw.LookupAccountAddressFromAddressID(ctx, addrid)
+ retAddr, err := ar.LookupAccountAddressFromAddressID(ctx, addrid)
if err != nil {
return err
}
@@ -1464,7 +1424,7 @@ func TestLookupAccountAddressFromAddressID(t *testing.T) {
}
}
// test fail case:
- retAddr, err := arw.LookupAccountAddressFromAddressID(ctx, nil)
+ retAddr, err := ar.LookupAccountAddressFromAddressID(ctx, nil)
if !errors.Is(err, sql.ErrNoRows) {
return fmt.Errorf("unexpected error : %w", err)
@@ -2093,10 +2053,9 @@ func initBoxDatabase(b *testing.B, totalBoxes, boxSize int) (db.Pair, func(), er
}
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- dbs, fn := storetesting.DbOpenTest(b, false)
- storetesting.SetDbLogging(b, dbs)
+ dbs, _ := storetesting.DbOpenTest(b, false)
cleanup := func() {
- cleanupTestDb(dbs, fn, false)
+ dbs.Close()
}
tx, err := dbs.Wdb.Handle.Begin()
@@ -2232,20 +2191,20 @@ func TestAccountOnlineQueries(t *testing.T) {
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- dbs, _ := sqlitedriver.DbOpenTrackerTest(t, true)
- dbs.SetLogger(logging.TestingLog(t))
+ dbs, _ := sqlitedriver.OpenForTesting(t, true)
defer dbs.Close()
err := dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error {
- arw, err := tx.MakeAccountsReaderWriter()
- if err != nil {
- return err
- }
+ ar, err := tx.MakeAccountsReader()
+ require.NoError(t, err)
+
+ aw, err := tx.MakeAccountsWriter()
+ require.NoError(t, err)
var accts map[basics.Address]basics.AccountData
tx.Testing().AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion)
- totals, err := arw.AccountsTotals(context.Background(), false)
+ totals, err := ar.AccountsTotals(context.Background(), false)
require.NoError(t, err)
var baseAccounts lruAccounts
@@ -2329,7 +2288,7 @@ func TestAccountOnlineQueries(t *testing.T) {
err = updatesOnlineCnt.accountsLoadOld(tx)
require.NoError(t, err)
- err = arw.AccountsPutTotals(totals, false)
+ err = aw.AccountsPutTotals(totals, false)
require.NoError(t, err)
updatedAccts, _, _, err := accountsNewRound(tx, updatesCnt, compactResourcesDeltas{}, nil, nil, proto, rnd)
require.NoError(t, err)
@@ -2339,7 +2298,7 @@ func TestAccountOnlineQueries(t *testing.T) {
require.NoError(t, err)
require.NotEmpty(t, updatedOnlineAccts)
- err = arw.UpdateAccountsRound(rnd)
+ err = aw.UpdateAccountsRound(rnd)
require.NoError(t, err)
return
@@ -2367,12 +2326,12 @@ func TestAccountOnlineQueries(t *testing.T) {
refoaB3 := round3poads[0].Ref
refoaC3 := round3poads[1].Ref
- queries, err := tx.Testing().MakeOnlineAccountsOptimizedReader()
+ queries, err := tx.MakeOnlineAccountsOptimizedReader()
require.NoError(t, err)
// check round 1
rnd := basics.Round(1)
- online, err := arw.AccountsOnlineTop(rnd, 0, 10, proto)
+ online, err := ar.AccountsOnlineTop(rnd, 0, 10, proto)
require.NoError(t, err)
require.Equal(t, 2, len(online))
require.NotContains(t, online, addrC)
@@ -2411,7 +2370,7 @@ func TestAccountOnlineQueries(t *testing.T) {
// check round 2
rnd = basics.Round(2)
- online, err = arw.AccountsOnlineTop(rnd, 0, 10, proto)
+ online, err = ar.AccountsOnlineTop(rnd, 0, 10, proto)
require.NoError(t, err)
require.Equal(t, 1, len(online))
require.NotContains(t, online, addrA)
@@ -2444,7 +2403,7 @@ func TestAccountOnlineQueries(t *testing.T) {
// check round 3
rnd = basics.Round(3)
- online, err = arw.AccountsOnlineTop(rnd, 0, 10, proto)
+ online, err = ar.AccountsOnlineTop(rnd, 0, 10, proto)
require.NoError(t, err)
require.Equal(t, 1, len(online))
require.NotContains(t, online, addrA)
@@ -2475,7 +2434,7 @@ func TestAccountOnlineQueries(t *testing.T) {
require.Equal(t, dataC3.AccountBaseData.MicroAlgos, paod.AccountData.MicroAlgos)
require.Equal(t, voteIDC, paod.AccountData.VoteID)
- paods, err := arw.OnlineAccountsAll(0)
+ paods, err := ar.OnlineAccountsAll(0)
require.NoError(t, err)
require.Equal(t, 5, len(paods))
@@ -2512,20 +2471,20 @@ func TestAccountOnlineQueries(t *testing.T) {
checkAddrC()
checkAddrA()
- paods, err = arw.OnlineAccountsAll(3)
+ paods, err = ar.OnlineAccountsAll(3)
require.NoError(t, err)
require.Equal(t, 5, len(paods))
checkAddrB()
checkAddrC()
checkAddrA()
- paods, err = arw.OnlineAccountsAll(2)
+ paods, err = ar.OnlineAccountsAll(2)
require.NoError(t, err)
require.Equal(t, 3, len(paods))
checkAddrB()
checkAddrC()
- paods, err = arw.OnlineAccountsAll(1)
+ paods, err = ar.OnlineAccountsAll(1)
require.NoError(t, err)
require.Equal(t, 2, len(paods))
checkAddrB()
diff --git a/ledger/acctonline.go b/ledger/acctonline.go
index a9834d61c..c73dfa010 100644
--- a/ledger/acctonline.go
+++ b/ledger/acctonline.go
@@ -60,7 +60,7 @@ type cachedOnlineAccount struct {
// onlineAccounts tracks history of online accounts
type onlineAccounts struct {
// Connection to the database.
- dbs trackerdb.TrackerStore
+ dbs trackerdb.Store
// Prepared SQL statements for fast accounts DB lookups.
accountsq trackerdb.OnlineAccountsReader
@@ -154,9 +154,9 @@ func (ao *onlineAccounts) initializeFromDisk(l ledgerForTracker, lastBalancesRou
ao.log = l.trackerLog()
err = ao.dbs.Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) error {
- ar, err := tx.MakeAccountsReader()
- if err != nil {
- return err
+ ar, makeErr := tx.MakeAccountsReader()
+ if makeErr != nil {
+ return makeErr
}
var err0 error
@@ -205,13 +205,15 @@ func (ao *onlineAccounts) latest() basics.Round {
// close closes the accountUpdates, waiting for all the child go-routine to complete
func (ao *onlineAccounts) close() {
+ // ao.voters' loadTree might use ao.accountsq if looking up DB
+ // so it must be closed before ao.accountsq
+ ao.voters.close()
+
if ao.accountsq != nil {
ao.accountsq.Close()
ao.accountsq = nil
}
- ao.voters.close()
-
ao.baseOnlineAccounts.prune(0)
}
@@ -433,23 +435,23 @@ func (ao *onlineAccounts) commitRound(ctx context.Context, tx trackerdb.Transact
return err
}
- arw, err := tx.MakeAccountsReaderWriter()
+ aw, err := tx.MakeAccountsWriter()
if err != nil {
return err
}
- err = arw.OnlineAccountsDelete(dcc.onlineAccountsForgetBefore)
+ err = aw.OnlineAccountsDelete(dcc.onlineAccountsForgetBefore)
if err != nil {
return err
}
- err = arw.AccountsPutOnlineRoundParams(dcc.onlineRoundParams, dcc.oldBase+1)
+ err = aw.AccountsPutOnlineRoundParams(dcc.onlineRoundParams, dcc.oldBase+1)
if err != nil {
return err
}
// delete all entries all older than maxBalLookback (or votersLookback) rounds ago
- err = arw.AccountsPruneOnlineRoundParams(dcc.onlineAccountsForgetBefore)
+ err = aw.AccountsPruneOnlineRoundParams(dcc.onlineAccountsForgetBefore)
return
}
@@ -556,6 +558,26 @@ func (ao *onlineAccounts) onlineCirculation(rnd basics.Round, voteRnd basics.Rou
return totalStake, nil
}
+// roundsParamsEx return the round params for the given round for extended rounds range
+// by looking into DB as needed
+// locking semantics: requires accountsMu.RLock()
+func (ao *onlineAccounts) roundsParamsEx(rnd basics.Round) (ledgercore.OnlineRoundParamsData, error) {
+ paramsOffset, err := ao.roundParamsOffset(rnd)
+ if err == nil {
+ return ao.onlineRoundParamsData[paramsOffset], nil
+ }
+ var roundOffsetError *RoundOffsetError
+ if !errors.As(err, &roundOffsetError) {
+ return ledgercore.OnlineRoundParamsData{}, err
+ }
+
+ roundParams, err := ao.accountsq.LookupOnlineRoundParams(rnd)
+ if err != nil {
+ return ledgercore.OnlineRoundParamsData{}, err
+ }
+ return roundParams, nil
+}
+
// onlineTotalsEx return the total online balance for the given round for extended rounds range
// by looking into DB
func (ao *onlineAccounts) onlineTotalsEx(rnd basics.Round) (basics.MicroAlgos, error) {
@@ -569,8 +591,12 @@ func (ao *onlineAccounts) onlineTotalsEx(rnd basics.Round) (basics.MicroAlgos, e
ao.log.Errorf("onlineTotals error: %v", err)
}
- totalsOnline, err = ao.accountsq.LookupOnlineTotalsHistory(rnd)
- return totalsOnline, err
+ roundParams, err := ao.accountsq.LookupOnlineRoundParams(rnd)
+ if err != nil {
+ return basics.MicroAlgos{}, err
+ }
+ totalsOnline = basics.MicroAlgos{Raw: roundParams.OnlineSupply}
+ return totalsOnline, nil
}
// onlineTotals returns the online totals of all accounts at the end of round rnd.
@@ -1002,13 +1028,13 @@ func (ao *onlineAccounts) onlineAcctsExpiredByRound(rnd, voteRnd basics.Round) (
// roundOffsetError was returned, so the round number cannot be found in deltas, it is in history.
// This means offset will be 0 and ao.deltas[:offset] will be an empty slice.
}
- paramsOffset, err := ao.roundParamsOffset(rnd)
+
+ roundParams, err := ao.roundsParamsEx(rnd)
if err != nil {
return nil, err
}
-
- rewardsParams := config.Consensus[ao.onlineRoundParamsData[paramsOffset].CurrentProtocol]
- rewardsLevel := ao.onlineRoundParamsData[paramsOffset].RewardsLevel
+ rewardsParams := config.Consensus[roundParams.CurrentProtocol]
+ rewardsLevel := roundParams.RewardsLevel
// Step 1: get all online accounts from DB for rnd
// Not unlocking ao.accountsMu yet, to stay consistent with Step 2
diff --git a/ledger/acctonline_expired_test.go b/ledger/acctonline_expired_test.go
index 0645d0dd7..bfc72fa98 100644
--- a/ledger/acctonline_expired_test.go
+++ b/ledger/acctonline_expired_test.go
@@ -71,6 +71,7 @@ type onlineAcctModelAcct struct {
func newMapOnlineAcctModel(t *testing.T) *mapOnlineAcctModel {
return &mapOnlineAcctModel{
t: t,
+ cur: 1,
accts: make(map[basics.Address]map[basics.Round]onlineAcctModelAcct),
expiring: make(map[basics.Round]map[basics.Address]struct{}),
}
@@ -94,7 +95,7 @@ func (m *mapOnlineAcctModel) lookupAcctAsOf(rnd basics.Round, addr basics.Addres
return onlineAcctModelAcct{}
}
// find the acct record for the most recent round <= rnd
- for r := rnd; r >= 0; r-- {
+ for r := rnd; r > 0; r-- {
if acct, ok := acctRounds[r]; ok {
return acct
}
@@ -113,7 +114,7 @@ func (m *mapOnlineAcctModel) allOnlineAsOf(rnd basics.Round) map[basics.Address]
accts := make(map[basics.Address]onlineAcctModelAcct)
for addr, acctRounds := range m.accts {
// find the acct record for the most recent round <= rnd
- for r := rnd; r >= 0; r-- {
+ for r := rnd; r > 0; r-- {
if acct, ok := acctRounds[r]; ok {
if acct.Status == basics.Online {
accts[addr] = acct
diff --git a/ledger/acctonline_test.go b/ledger/acctonline_test.go
index 0bee1f35f..a8f7a8fca 100644
--- a/ledger/acctonline_test.go
+++ b/ledger/acctonline_test.go
@@ -18,7 +18,6 @@ package ledger
import (
"context"
- "database/sql"
"fmt"
"sort"
"strconv"
@@ -80,7 +79,7 @@ func commitSyncPartial(t *testing.T, oa *onlineAccounts, ml *mockLedgerForTracke
require.NoError(t, err)
}
err := ml.trackers.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
- arw, err := tx.MakeAccountsReaderWriter()
+ aw, err := tx.MakeAccountsWriter()
if err != nil {
return err
}
@@ -92,7 +91,7 @@ func commitSyncPartial(t *testing.T, oa *onlineAccounts, ml *mockLedgerForTracke
}
}
- return arw.UpdateAccountsRound(newBase)
+ return aw.UpdateAccountsRound(newBase)
})
require.NoError(t, err)
}()
@@ -115,8 +114,7 @@ func commitSyncPartialComplete(t *testing.T, oa *onlineAccounts, ml *mockLedgerF
}
}
-func newBlock(t *testing.T, ml *mockLedgerForTracker, testProtocolVersion protocol.ConsensusVersion, protoParams config.ConsensusParams, rnd basics.Round, base map[basics.Address]basics.AccountData, updates ledgercore.AccountDeltas, prevTotals ledgercore.AccountTotals) (newTotals ledgercore.AccountTotals) {
- rewardLevel := uint64(0)
+func newBlockWithRewards(t *testing.T, ml *mockLedgerForTracker, testProtocolVersion protocol.ConsensusVersion, protoParams config.ConsensusParams, rnd basics.Round, base map[basics.Address]basics.AccountData, updates ledgercore.AccountDeltas, rewardLevel uint64, prevTotals ledgercore.AccountTotals) (newTotals ledgercore.AccountTotals) {
newTotals = ledgertesting.CalculateNewRoundAccountTotals(t, updates, rewardLevel, protoParams, base, prevTotals)
blk := bookkeeping.Block{
@@ -135,6 +133,10 @@ func newBlock(t *testing.T, ml *mockLedgerForTracker, testProtocolVersion protoc
return newTotals
}
+func newBlock(t *testing.T, ml *mockLedgerForTracker, testProtocolVersion protocol.ConsensusVersion, protoParams config.ConsensusParams, rnd basics.Round, base map[basics.Address]basics.AccountData, updates ledgercore.AccountDeltas, prevTotals ledgercore.AccountTotals) (newTotals ledgercore.AccountTotals) {
+ return newBlockWithRewards(t, ml, testProtocolVersion, protoParams, rnd, base, updates, 0, prevTotals)
+}
+
// TestAcctOnline checks the online accounts tracker correctly stores accont change history
// 1. Start with 1000 online accounts
// 2. Every round set one of them offline
@@ -1316,7 +1318,7 @@ func TestAcctOnlineVotersLongerHistory(t *testing.T) {
require.NoError(t, err)
_, err = oa.onlineTotalsEx(lowest - 1)
- require.ErrorIs(t, err, sql.ErrNoRows)
+ require.ErrorIs(t, err, trackerdb.ErrNotFound)
// ensure the cache size for addrA does not have more entries than maxBalLookback + 1
// +1 comes from the deletion before X without checking account state at X
@@ -2179,3 +2181,114 @@ func TestAcctOnline_ExpiredOnlineCirculation(t *testing.T) {
}
}
}
+
+// TestAcctOnline_OnlineAcctsExpiredByRound ensures that onlineAcctsExpiredByRound
+// can retrieve data from DB even if trackersDB flushed and the requested round is in
+// extended history controlled by voters' lowest round.
+// The test uses non-empty rewards in order to ensure onlineAcctsExpiredByRound internally fetches
+// actual non-empty rewards data from DB.
+func TestAcctOnline_OnlineAcctsExpiredByRound(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ const seedLookback = 2
+ const seedInteval = 3
+ const maxBalLookback = 2 * seedLookback * seedInteval
+
+ testProtocolVersion := protocol.ConsensusVersion("test-protocol-OnlineAcctsExpiredByRound")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.MaxBalLookback = maxBalLookback
+ protoParams.SeedLookback = seedLookback
+ protoParams.SeedRefreshInterval = seedInteval
+ protoParams.StateProofInterval = 16
+ protoParams.RewardsRateRefreshInterval = 10
+ config.Consensus[testProtocolVersion] = protoParams
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ maxRound := 5*basics.Round(protoParams.StateProofInterval) + 1
+ targetRound := basics.Round(protoParams.StateProofInterval * 2)
+
+ const numAccts = 20
+ allAccts := make([]basics.BalanceRecord, numAccts)
+ genesisAccts := []map[basics.Address]basics.AccountData{{}}
+ genesisAccts[0] = make(map[basics.Address]basics.AccountData, numAccts)
+ numExpiredAccts := 5
+ totalExpiredStake := basics.MicroAlgos{Raw: 0}
+ for i := 0; i < numAccts; i++ {
+ allAccts[i] = basics.BalanceRecord{
+ Addr: ledgertesting.RandomAddress(),
+ AccountData: ledgertesting.RandomOnlineAccountData(0),
+ }
+ // make some accounts to expire before the targetRound
+ if i < numExpiredAccts {
+ allAccts[i].AccountData.VoteLastValid = targetRound - 1
+ totalExpiredStake.Raw += allAccts[i].MicroAlgos.Raw
+ }
+ genesisAccts[0][allAccts[i].Addr] = allAccts[i].AccountData
+ }
+
+ addSinkAndPoolAccounts(genesisAccts)
+
+ ml := makeMockLedgerForTracker(t, true, 1, testProtocolVersion, genesisAccts)
+ defer ml.Close()
+ conf := config.GetDefaultLocal()
+ conf.MaxAcctLookback = maxBalLookback
+
+ au, oa := newAcctUpdates(t, ml, conf)
+ defer oa.close()
+ _, totals, err := au.LatestTotals()
+ require.NoError(t, err)
+
+ accounts := genesisAccts
+ var updates ledgercore.AccountDeltas
+ base := accounts[0]
+
+ // add some blocks to cover few stateproof periods
+ for i := basics.Round(1); i <= maxRound; i++ {
+ newAccts := applyPartialDeltas(base, updates)
+ accounts = append(accounts, newAccts)
+ totals = newBlockWithRewards(t, ml, testProtocolVersion, protoParams, i, base, updates, uint64(i), totals)
+ base = newAccts
+ }
+
+ // ensure voters kicked in
+ require.Greater(t, len(oa.voters.votersForRoundCache), 1)
+ lowestRound := oa.voters.lowestRound(maxRound)
+ require.Equal(t, basics.Round(protoParams.StateProofInterval), lowestRound)
+
+ // commit max possible number of rounds
+ commitSync(t, oa, ml, maxRound)
+ // check voters did not allow to remove online accounts and params data after commit
+ require.Equal(t, lowestRound, oa.voters.lowestRound(maxRound))
+
+ // check the stateproof interval 2 not in deltas
+ offset, err := oa.roundOffset(targetRound)
+ require.Error(t, err)
+ var roundOffsetError *RoundOffsetError
+ require.ErrorAs(t, err, &roundOffsetError)
+ require.Zero(t, offset)
+
+ offset, err = oa.roundParamsOffset(targetRound)
+ require.Error(t, err)
+ require.ErrorAs(t, err, &roundOffsetError)
+ require.Zero(t, offset)
+
+ // but the DB has data
+ roundParamsData, err := oa.accountsq.LookupOnlineRoundParams(targetRound)
+ require.NoError(t, err)
+ require.NotEmpty(t, roundParamsData)
+
+ // but still available for lookup via onlineAcctsExpiredByRound
+ expAccts, err := oa.onlineAcctsExpiredByRound(targetRound, targetRound+10)
+ require.NoError(t, err)
+ require.Len(t, expAccts, numExpiredAccts)
+
+ var expiredStake basics.MicroAlgos
+ for _, expAcct := range expAccts {
+ expiredStake.Raw += expAcct.MicroAlgosWithRewards.Raw
+ }
+
+ // ensure onlineAcctsExpiredByRound fetched proto and rewards level and it recalculated
+ require.Greater(t, expiredStake.Raw, totalExpiredStake.Raw)
+}
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index 5640800ce..bb495f3f8 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -151,7 +151,7 @@ type modifiedKvValue struct {
type accountUpdates struct {
// Connection to the database.
- dbs trackerdb.TrackerStore
+ dbs trackerdb.Store
// Optimized reader for fast accounts DB lookups.
accountsq trackerdb.AccountsReader
@@ -586,100 +586,6 @@ func (au *accountUpdates) LookupWithoutRewards(rnd basics.Round, addr basics.Add
return
}
-// ListAssets lists the assets by their asset index, limiting to the first maxResults
-func (au *accountUpdates) ListAssets(maxAssetIdx basics.AssetIndex, maxResults uint64) ([]basics.CreatableLocator, error) {
- return au.listCreatables(basics.CreatableIndex(maxAssetIdx), maxResults, basics.AssetCreatable)
-}
-
-// ListApplications lists the application by their app index, limiting to the first maxResults
-func (au *accountUpdates) ListApplications(maxAppIdx basics.AppIndex, maxResults uint64) ([]basics.CreatableLocator, error) {
- return au.listCreatables(basics.CreatableIndex(maxAppIdx), maxResults, basics.AppCreatable)
-}
-
-// listCreatables lists the application/asset by their app/asset index, limiting to the first maxResults
-func (au *accountUpdates) listCreatables(maxCreatableIdx basics.CreatableIndex, maxResults uint64, ctype basics.CreatableType) ([]basics.CreatableLocator, error) {
- au.accountsMu.RLock()
- for {
- currentDbRound := au.cachedDBRound
- currentDeltaLen := len(au.deltas)
- // Sort indices for creatables that have been created/deleted. If this
- // turns out to be too inefficient, we could keep around a heap of
- // created/deleted asset indices in memory.
- keys := make([]basics.CreatableIndex, 0, len(au.creatables))
- for cidx, delta := range au.creatables {
- if delta.Ctype != ctype {
- continue
- }
- if cidx <= maxCreatableIdx {
- keys = append(keys, cidx)
- }
- }
- sort.Slice(keys, func(i, j int) bool { return keys[i] > keys[j] })
-
- // Check for creatables that haven't been synced to disk yet.
- unsyncedCreatables := make([]basics.CreatableLocator, 0, len(keys))
- deletedCreatables := make(map[basics.CreatableIndex]bool, len(keys))
- for _, cidx := range keys {
- delta := au.creatables[cidx]
- if delta.Created {
- // Created but only exists in memory
- unsyncedCreatables = append(unsyncedCreatables, basics.CreatableLocator{
- Type: delta.Ctype,
- Index: cidx,
- Creator: delta.Creator,
- })
- } else {
- // Mark deleted creatables for exclusion from the results set
- deletedCreatables[cidx] = true
- }
- }
-
- au.accountsMu.RUnlock()
-
- // Check in-memory created creatables, which will always be newer than anything
- // in the database
- if uint64(len(unsyncedCreatables)) >= maxResults {
- return unsyncedCreatables[:maxResults], nil
- }
- res := unsyncedCreatables
-
- // Fetch up to maxResults - len(res) + len(deletedCreatables) from the database,
- // so we have enough extras in case creatables were deleted
- numToFetch := maxResults - uint64(len(res)) + uint64(len(deletedCreatables))
- dbResults, dbRound, err := au.accountsq.ListCreatables(maxCreatableIdx, numToFetch, ctype)
- if err != nil {
- return nil, err
- }
-
- if dbRound == currentDbRound {
- // Now we merge the database results with the in-memory results
- for _, loc := range dbResults {
- // Check if we have enough results
- if uint64(len(res)) == maxResults {
- return res, nil
- }
-
- // Creatable was deleted
- if _, ok := deletedCreatables[loc.Index]; ok {
- continue
- }
-
- // We're OK to include this result
- res = append(res, loc)
- }
- return res, nil
- }
- if dbRound < currentDbRound {
- au.log.Errorf("listCreatables: database round %d is behind in-memory round %d", dbRound, currentDbRound)
- return []basics.CreatableLocator{}, &StaleDatabaseRoundError{databaseRound: dbRound, memoryRound: currentDbRound}
- }
- au.accountsMu.RLock()
- for currentDbRound >= au.cachedDBRound && currentDeltaLen == len(au.deltas) {
- au.accountsReadCond.Wait()
- }
- }
-}
-
// GetCreatorForRound returns the creator for a given asset/app index at a given round
func (au *accountUpdates) GetCreatorForRound(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (creator basics.Address, ok bool, err error) {
return au.getCreatorForRound(rnd, cidx, ctype, true /* take the lock */)
@@ -914,17 +820,17 @@ func (au *accountUpdates) latestTotalsImpl() (basics.Round, ledgercore.AccountTo
// initializeFromDisk performs the atomic operation of loading the accounts data information from disk
// and preparing the accountUpdates for operation.
-func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRound basics.Round) (err error) {
+func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRound basics.Round) error {
au.dbs = l.trackerDB()
au.log = l.trackerLog()
au.ledger = l
start := time.Now()
ledgerAccountsinitCount.Inc(nil)
- err = au.dbs.Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) error {
- ar, err := tx.MakeAccountsReader()
- if err != nil {
- return err
+ err := au.dbs.Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) error {
+ ar, err0 := tx.MakeAccountsReader()
+ if err0 != nil {
+ return err0
}
totals, err0 := ar.AccountsTotals(ctx, false)
@@ -938,17 +844,17 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRou
ledgerAccountsinitMicros.AddMicrosecondsSince(start, nil)
if err != nil {
- return
+ return err
}
au.accountsq, err = au.dbs.MakeAccountsOptimizedReader()
if err != nil {
- return
+ return err
}
hdr, err := l.BlockHdr(lastBalancesRound)
if err != nil {
- return
+ return err
}
au.versions = []protocol.ConsensusVersion{hdr.CurrentProtocol}
@@ -968,7 +874,7 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker, lastBalancesRou
au.baseResources.init(au.log, 0, 1)
au.baseKVs.init(au.log, 0, 1)
}
- return
+ return nil
}
// newBlockImpl is the accountUpdates implementation of the ledgerTracker interface. This is the "internal" facing function
@@ -1171,7 +1077,7 @@ func (au *accountUpdates) lookupLatest(addr basics.Address) (data basics.Account
// use a cache of the most recent account state.
ad = macct.data
foundAccount = true
- } else if pad, has := au.baseAccounts.read(addr); has && pad.Round == currentDbRound {
+ } else if pad, inLRU := au.baseAccounts.read(addr); inLRU && pad.Round == currentDbRound {
// we don't technically need this, since it's already in the baseAccounts, however, writing this over
// would ensure that we promote this field.
au.baseAccounts.writePending(pad)
@@ -1185,8 +1091,8 @@ func (au *accountUpdates) lookupLatest(addr basics.Address) (data basics.Account
// check for resources modified in the past rounds, in the deltas
for cidx, mr := range au.resources.getForAddress(addr) {
- if err := addResource(cidx, rnd, mr.resource); err != nil {
- return basics.AccountData{}, basics.Round(0), basics.MicroAlgos{}, err
+ if addErr := addResource(cidx, rnd, mr.resource); addErr != nil {
+ return basics.AccountData{}, basics.Round(0), basics.MicroAlgos{}, addErr
}
}
@@ -1201,8 +1107,8 @@ func (au *accountUpdates) lookupLatest(addr basics.Address) (data basics.Account
// would ensure that we promote this field.
au.baseResources.writePending(prd, addr)
if prd.AcctRef != nil {
- if err := addResource(prd.Aidx, rnd, prd.AccountResource()); err != nil {
- return basics.AccountData{}, basics.Round(0), basics.MicroAlgos{}, err
+ if addErr := addResource(prd.Aidx, rnd, prd.AccountResource()); addErr != nil {
+ return basics.AccountData{}, basics.Round(0), basics.MicroAlgos{}, addErr
}
}
}
@@ -1256,8 +1162,8 @@ func (au *accountUpdates) lookupLatest(addr basics.Address) (data basics.Account
if resourceDbRound == currentDbRound {
for _, pd := range persistedResources {
au.baseResources.writePending(pd, addr)
- if err := addResource(pd.Aidx, currentDbRound, pd.AccountResource()); err != nil {
- return basics.AccountData{}, basics.Round(0), basics.MicroAlgos{}, err
+ if addErr := addResource(pd.Aidx, currentDbRound, pd.AccountResource()); addErr != nil {
+ return basics.AccountData{}, basics.Round(0), basics.MicroAlgos{}, addErr
}
}
// We've found all the resources we could find for this address.
@@ -1508,7 +1414,7 @@ func (au *accountUpdates) lookupWithoutRewards(rnd basics.Round, addr basics.Add
}
// getCreatorForRound returns the asset/app creator for a given asset/app index at a given round
-func (au *accountUpdates) getCreatorForRound(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType, synchronized bool) (creator basics.Address, ok bool, err error) {
+func (au *accountUpdates) getCreatorForRound(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType, synchronized bool) (basics.Address, bool, error) {
unlock := false
if synchronized {
au.accountsMu.RLock()
@@ -1524,6 +1430,7 @@ func (au *accountUpdates) getCreatorForRound(rnd basics.Round, cidx basics.Creat
for {
currentDbRound := au.cachedDBRound
currentDeltaLen := len(au.deltas)
+ var err error
offset, err = au.roundOffset(rnd)
if err != nil {
return basics.Address{}, false, err
@@ -1558,6 +1465,8 @@ func (au *accountUpdates) getCreatorForRound(rnd basics.Round, cidx basics.Creat
unlock = false
}
// Check the database
+ var ok bool
+ var creator basics.Address
creator, ok, dbRound, err = au.accountsq.LookupCreator(cidx, ctype)
if err != nil {
return basics.Address{}, false, err
@@ -1682,12 +1591,12 @@ func (au *accountUpdates) commitRound(ctx context.Context, tx trackerdb.Transact
dcc.stats.OldAccountPreloadDuration = time.Duration(time.Now().UnixNano()) - dcc.stats.OldAccountPreloadDuration
}
- arw, err := tx.MakeAccountsReaderWriter()
+ aw, err := tx.MakeAccountsWriter()
if err != nil {
return err
}
- err = arw.AccountsPutTotals(dcc.roundTotals, false)
+ err = aw.AccountsPutTotals(dcc.roundTotals, false)
if err != nil {
return err
}
diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go
index 0faaa4de6..d938b0d53 100644
--- a/ledger/acctupdates_test.go
+++ b/ledger/acctupdates_test.go
@@ -51,7 +51,7 @@ var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
var testSinkAddr = basics.Address{0x2c, 0x2a, 0x6c, 0xe9, 0xa9, 0xa7, 0xc2, 0x8c, 0x22, 0x95, 0xfd, 0x32, 0x4f, 0x77, 0xa5, 0x4, 0x8b, 0x42, 0xc2, 0xb7, 0xa8, 0x54, 0x84, 0xb6, 0x80, 0xb1, 0xe1, 0x3d, 0x59, 0x9b, 0xeb, 0x36}
type mockLedgerForTracker struct {
- dbs trackerdb.TrackerStore
+ dbs trackerdb.Store
blocks []blockEntry
deltas []ledgercore.StateDelta
log logging.Logger
@@ -110,8 +110,7 @@ func setupAccts(niter int) []map[basics.Address]basics.AccountData {
}
func makeMockLedgerForTrackerWithLogger(t testing.TB, inMemory bool, initialBlocksCount int, consensusVersion protocol.ConsensusVersion, accts []map[basics.Address]basics.AccountData, l logging.Logger) *mockLedgerForTracker {
- dbs, fileName := sqlitedriver.DbOpenTrackerTest(t, inMemory)
- dbs.SetLogger(l)
+ dbs, fileName := sqlitedriver.OpenForTesting(t, inMemory)
blocks := randomInitChain(consensusVersion, initialBlocksCount)
deltas := make([]ledgercore.StateDelta, initialBlocksCount)
@@ -182,7 +181,7 @@ func (ml *mockLedgerForTracker) fork(t testing.TB) *mockLedgerForTracker {
dbs.Rdb.SetLogger(dblogger)
dbs.Wdb.SetLogger(dblogger)
- newLedgerTracker.dbs = sqlitedriver.CreateTrackerSQLStore(dbs)
+ newLedgerTracker.dbs = sqlitedriver.MakeStore(dbs)
return newLedgerTracker
}
@@ -253,7 +252,7 @@ func (ml *mockLedgerForTracker) BlockHdr(rnd basics.Round) (bookkeeping.BlockHea
return ml.blocks[int(rnd)].block.BlockHeader, nil
}
-func (ml *mockLedgerForTracker) trackerDB() trackerdb.TrackerStore {
+func (ml *mockLedgerForTracker) trackerDB() trackerdb.Store {
return ml.dbs
}
@@ -297,13 +296,13 @@ func (au *accountUpdates) allBalances(rnd basics.Round) (bals map[basics.Address
return
}
- err = au.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error {
+ err = au.dbs.Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) error {
var err0 error
- arw, err := tx.MakeAccountsReaderWriter()
+ ar, err := tx.MakeAccountsReader()
if err != nil {
return err
}
- bals, err0 = arw.Testing().AccountsAllTest()
+ bals, err0 = ar.Testing().AccountsAllTest()
return err0
})
if err != nil {
@@ -914,185 +913,6 @@ func testAcctUpdatesUpdatesCorrectness(t *testing.T, cfg config.Local) {
t.Run("DiskDB", testFunction)
}
-// listAndCompareComb lists the assets/applications and then compares against the expected
-// It repeats with different combinations of the limit parameters
-func listAndCompareComb(t *testing.T, au *accountUpdates, expected map[basics.CreatableIndex]ledgercore.ModifiedCreatable) {
-
- // test configuration parameters
-
- // pick the second largest index for the app and asset
- // This is to make sure exactly one element is left out
- // as a result of max index
- maxAss1 := basics.CreatableIndex(0)
- maxAss2 := basics.CreatableIndex(0)
- maxApp1 := basics.CreatableIndex(0)
- maxApp2 := basics.CreatableIndex(0)
- for a, b := range expected {
- // A moving window of the last two largest indexes: [maxAss1, maxAss2]
- if b.Ctype == basics.AssetCreatable {
- if maxAss2 < a {
- maxAss1 = maxAss2
- maxAss2 = a
- } else if maxAss1 < a {
- maxAss1 = a
- }
- }
- if b.Ctype == basics.AppCreatable {
- if maxApp2 < a {
- maxApp1 = maxApp2
- maxApp2 = a
- } else if maxApp1 < a {
- maxApp1 = a
- }
- }
- }
-
- // No limits. max asset index, max app index and max results have no effect
- // This is to make sure the deleted elements do not show up
- maxAssetIdx := basics.AssetIndex(maxAss2)
- maxAppIdx := basics.AppIndex(maxApp2)
- maxResults := uint64(len(expected))
- listAndCompare(t, maxAssetIdx, maxAppIdx, maxResults, au, expected)
-
- // Limit with max asset index and max app index (max results has no effect)
- maxAssetIdx = basics.AssetIndex(maxAss1)
- maxAppIdx = basics.AppIndex(maxApp1)
- maxResults = uint64(len(expected))
- listAndCompare(t, maxAssetIdx, maxAppIdx, maxResults, au, expected)
-
- // Limit with max results
- maxResults = 1
- listAndCompare(t, maxAssetIdx, maxAppIdx, maxResults, au, expected)
-}
-
-// listAndCompareComb lists the assets/applications and then compares against the expected
-// It uses the provided limit parameters
-func listAndCompare(t *testing.T,
- maxAssetIdx basics.AssetIndex,
- maxAppIdx basics.AppIndex,
- maxResults uint64,
- au *accountUpdates,
- expected map[basics.CreatableIndex]ledgercore.ModifiedCreatable) {
-
- // get the results with the given parameters
- assetRes, err := au.ListAssets(maxAssetIdx, maxResults)
- require.NoError(t, err)
- appRes, err := au.ListApplications(maxAppIdx, maxResults)
- require.NoError(t, err)
-
- // count the expected number of results
- expectedAssetCount := uint64(0)
- expectedAppCount := uint64(0)
- for a, b := range expected {
- if b.Created {
- if b.Ctype == basics.AssetCreatable &&
- a <= basics.CreatableIndex(maxAssetIdx) &&
- expectedAssetCount < maxResults {
- expectedAssetCount++
- }
- if b.Ctype == basics.AppCreatable &&
- a <= basics.CreatableIndex(maxAppIdx) &&
- expectedAppCount < maxResults {
- expectedAppCount++
- }
- }
- }
-
- // check the total counts are as expected
- require.Equal(t, int(expectedAssetCount), len(assetRes))
- require.Equal(t, int(expectedAppCount), len(appRes))
-
- // verify the results are correct
- for _, respCrtor := range assetRes {
- crtor := expected[respCrtor.Index]
- require.NotNil(t, crtor)
- require.Equal(t, basics.AssetCreatable, crtor.Ctype)
- require.Equal(t, true, crtor.Created)
-
- require.Equal(t, basics.AssetCreatable, respCrtor.Type)
- require.Equal(t, crtor.Creator, respCrtor.Creator)
- }
- for _, respCrtor := range appRes {
- crtor := expected[respCrtor.Index]
- require.NotNil(t, crtor)
- require.Equal(t, basics.AppCreatable, crtor.Ctype)
- require.Equal(t, true, crtor.Created)
-
- require.Equal(t, basics.AppCreatable, respCrtor.Type)
- require.Equal(t, crtor.Creator, respCrtor.Creator)
- }
-}
-
-// TestListCreatables tests ListAssets and ListApplications
-// It tests with all elements in cache, all synced to database, and combination of both
-// It also tests the max results, max app index and max asset index
-func TestListCreatables(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- // test configuration parameters
- numElementsPerSegement := 25
-
- // set up the database
- dbs, _ := sqlitedriver.DbOpenTrackerTest(t, true)
- dblogger := logging.TestingLog(t)
- dbs.SetLogger(dblogger)
- defer dbs.Close()
-
- err := dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
-
- accts := make(map[basics.Address]basics.AccountData)
- _ = tx.Testing().AccountsInitTest(t, accts, protocol.ConsensusCurrentVersion)
- require.NoError(t, err)
-
- au := &accountUpdates{}
- au.accountsq, err = tx.Testing().MakeAccountsOptimizedReader()
- require.NoError(t, err)
-
- // ******* All results are obtained from the cache. Empty database *******
- // ******* No deletes *******
- // get random data. Initial batch, no deletes
- ctbsList, randomCtbs := randomCreatables(numElementsPerSegement)
- expectedDbImage := make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable)
- ctbsWithDeletes := randomCreatableSampling(1, ctbsList, randomCtbs,
- expectedDbImage, numElementsPerSegement)
- // set the cache
- au.creatables = ctbsWithDeletes
- listAndCompareComb(t, au, expectedDbImage)
-
- // ******* All results are obtained from the database. Empty cache *******
- // ******* No deletes *******
- // sync with the database
- var updates compactAccountDeltas
- var resUpdates compactResourcesDeltas
- _, _, _, err = accountsNewRound(tx, updates, resUpdates, nil, ctbsWithDeletes, proto, basics.Round(1))
- require.NoError(t, err)
- // nothing left in cache
- au.creatables = make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable)
- listAndCompareComb(t, au, expectedDbImage)
-
- // ******* Results are obtained from the database and from the cache *******
- // ******* No deletes in the database. *******
- // ******* Data in the database deleted in the cache *******
- au.creatables = randomCreatableSampling(2, ctbsList, randomCtbs,
- expectedDbImage, numElementsPerSegement)
- listAndCompareComb(t, au, expectedDbImage)
-
- // ******* Results are obtained from the database and from the cache *******
- // ******* Deletes are in the database and in the cache *******
- // sync with the database. This has deletes synced to the database.
- _, _, _, err = accountsNewRound(tx, updates, resUpdates, nil, au.creatables, proto, basics.Round(1))
- require.NoError(t, err)
- // get new creatables in the cache. There will be deleted in the cache from the previous batch.
- au.creatables = randomCreatableSampling(3, ctbsList, randomCtbs,
- expectedDbImage, numElementsPerSegement)
- listAndCompareComb(t, au, expectedDbImage)
-
- return
- })
- require.NoError(t, err)
-}
-
func TestBoxNamesByAppIDs(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -2190,7 +2010,7 @@ func TestAcctUpdatesResources(t *testing.T) {
err := au.prepareCommit(dcc)
require.NoError(t, err)
err = ml.trackers.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
- arw, err := tx.MakeAccountsReaderWriter()
+ aw, err := tx.MakeAccountsWriter()
if err != nil {
return err
}
@@ -2199,7 +2019,7 @@ func TestAcctUpdatesResources(t *testing.T) {
if err != nil {
return err
}
- err = arw.UpdateAccountsRound(newBase)
+ err = aw.UpdateAccountsRound(newBase)
return err
})
require.NoError(t, err)
@@ -2477,7 +2297,7 @@ func auCommitSync(t *testing.T, rnd basics.Round, au *accountUpdates, ml *mockLe
err := au.prepareCommit(dcc)
require.NoError(t, err)
err = ml.trackers.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
- arw, err := tx.MakeAccountsReaderWriter()
+ aw, err := tx.MakeAccountsWriter()
if err != nil {
return err
}
@@ -2486,7 +2306,7 @@ func auCommitSync(t *testing.T, rnd basics.Round, au *accountUpdates, ml *mockLe
if err != nil {
return err
}
- err = arw.UpdateAccountsRound(newBase)
+ err = aw.UpdateAccountsRound(newBase)
return err
})
require.NoError(t, err)
diff --git a/ledger/apply/application.go b/ledger/apply/application.go
index 3522b6080..c87368dfe 100644
--- a/ledger/apply/application.go
+++ b/ledger/apply/application.go
@@ -110,7 +110,7 @@ func createApplication(ac *transactions.ApplicationCallTxnFields, balances Balan
// Update the cached TotalExtraAppPages for this account, used
// when computing MinBalance
totalExtraPages := record.TotalExtraAppPages
- totalExtraPages = basics.AddSaturate32(totalExtraPages, ac.ExtraProgramPages)
+ totalExtraPages = basics.AddSaturate(totalExtraPages, ac.ExtraProgramPages)
record.TotalExtraAppPages = totalExtraPages
// Write back to the creator's balance record
@@ -161,7 +161,7 @@ func deleteApplication(balances Balances, creator basics.Address, appIdx basics.
proto := balances.ConsensusParams()
if proto.EnableExtraPagesOnAppUpdate {
extraPages := params.ExtraProgramPages
- totalExtraPages = basics.SubSaturate32(totalExtraPages, extraPages)
+ totalExtraPages = basics.SubSaturate(totalExtraPages, extraPages)
}
record.TotalExtraAppPages = totalExtraPages
}
@@ -385,7 +385,7 @@ func ApplicationCall(ac transactions.ApplicationCallTxnFields, header transactio
// If this txn is going to set new programs (either for creation or
// update), check that the programs are valid and not too expensive
if ac.ApplicationID == 0 || ac.OnCompletion == transactions.UpdateApplicationOC {
- err := transactions.CheckContractVersions(ac.ApprovalProgram, ac.ClearStateProgram, params, evalParams.Proto)
+ err = transactions.CheckContractVersions(ac.ApprovalProgram, ac.ClearStateProgram, params, evalParams.Proto)
if err != nil {
return err
}
@@ -401,9 +401,9 @@ func ApplicationCall(ac transactions.ApplicationCallTxnFields, header transactio
// execute the ClearStateProgram, whose failures are ignored.
if ac.OnCompletion == transactions.ClearStateOC {
// Ensure that the user is already opted in
- ok, err := balances.HasAppLocalState(header.Sender, appIdx)
- if err != nil {
- return err
+ ok, hasErr := balances.HasAppLocalState(header.Sender, appIdx)
+ if hasErr != nil {
+ return hasErr
}
if !ok {
return fmt.Errorf("cannot clear state: %v is not currently opted in to app %d", header.Sender, appIdx)
@@ -411,16 +411,16 @@ func ApplicationCall(ac transactions.ApplicationCallTxnFields, header transactio
// If the app still exists, run the ClearStateProgram
if exists {
- pass, evalDelta, err := balances.StatefulEval(gi, evalParams, appIdx, params.ClearStateProgram)
- if err != nil {
+ pass, evalDelta, evalErr := balances.StatefulEval(gi, evalParams, appIdx, params.ClearStateProgram)
+ if evalErr != nil {
// ClearStateProgram evaluation can't make the txn fail.
- if _, ok := err.(logic.EvalError); !ok {
- return err
+ if _, ok := evalErr.(logic.EvalError); !ok {
+ return evalErr
}
}
// We will have applied any changes if and only if we passed
- if err == nil && pass {
+ if evalErr == nil && pass {
// Fill in applyData, so that consumers don't have to implement a
// stateful TEAL interpreter to apply state changes
ad.EvalDelta = evalDelta
diff --git a/ledger/apply/application_test.go b/ledger/apply/application_test.go
index 37c132c13..0e6a1ff9a 100644
--- a/ledger/apply/application_test.go
+++ b/ledger/apply/application_test.go
@@ -22,6 +22,7 @@ import (
"testing"
"github.com/stretchr/testify/require"
+ "golang.org/x/exp/maps"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
@@ -33,28 +34,6 @@ import (
"github.com/algorand/go-algorand/test/partitiontest"
)
-// Allocate the map of basics.AppParams if it is nil, and return a copy. We do *not*
-// call clone on each basics.AppParams -- callers must do that for any values where
-// they intend to modify a contained reference type.
-func cloneAppParams(m map[basics.AppIndex]basics.AppParams) map[basics.AppIndex]basics.AppParams {
- res := make(map[basics.AppIndex]basics.AppParams, len(m))
- for k, v := range m {
- res[k] = v
- }
- return res
-}
-
-// Allocate the map of LocalStates if it is nil, and return a copy. We do *not*
-// call clone on each AppLocalState -- callers must do that for any values
-// where they intend to modify a contained reference type.
-func cloneAppLocalStates(m map[basics.AppIndex]basics.AppLocalState) map[basics.AppIndex]basics.AppLocalState {
- res := make(map[basics.AppIndex]basics.AppLocalState, len(m))
- for k, v := range m {
- res[k] = v
- }
- return res
-}
-
func TestApplicationCallFieldsEmpty(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -349,20 +328,6 @@ func (b *testBalances) SetParams(params config.ConsensusParams) {
b.proto = params
}
-func TestAppCallCloneEmpty(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
-
- var ls map[basics.AppIndex]basics.AppLocalState
- cls := cloneAppLocalStates(ls)
- a.Zero(len(cls))
-
- var ap map[basics.AppIndex]basics.AppParams
- cap := cloneAppParams(ap)
- a.Zero(len(cap))
-}
-
func TestAppCallGetParam(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -560,8 +525,8 @@ func TestAppCallApplyCreate(t *testing.T) {
// now we give the creator the app params again
cp := basics.AccountData{}
- cp.AppParams = cloneAppParams(saved.AppParams)
- cp.AppLocalStates = cloneAppLocalStates(saved.AppLocalStates)
+ cp.AppParams = maps.Clone(saved.AppParams)
+ cp.AppLocalStates = maps.Clone(saved.AppLocalStates)
b.balances[creator] = cp
err = ApplicationCall(ac, h, b, ad, 0, &ep, txnCounter)
a.Error(err)
@@ -571,9 +536,6 @@ func TestAppCallApplyCreate(t *testing.T) {
a.Zero(b.putAppParams)
// ensure original balance record in the mock was not changed
// this ensure proper cloning and any in-intended in-memory modifications
- //
- // known artefact of cloning AppLocalState even with empty update, nil map vs empty map
- saved.AppLocalStates = map[basics.AppIndex]basics.AppLocalState{}
a.Equal(saved, b.balances[creator])
saved = b.putBalances[creator]
diff --git a/ledger/apply/asset.go b/ledger/apply/asset.go
index a29dd7ff5..f0701d28e 100644
--- a/ledger/apply/asset.go
+++ b/ledger/apply/asset.go
@@ -110,9 +110,9 @@ func AssetConfig(cc transactions.AssetConfigTxnFields, header transactions.Heade
}
// Re-configuration and destroying must be done by the manager key.
- params, creator, err := getParams(balances, cc.ConfigAsset)
- if err != nil {
- return err
+ params, creator, paramsErr := getParams(balances, cc.ConfigAsset)
+ if paramsErr != nil {
+ return paramsErr
}
if params.Manager.IsZero() || (header.Sender != params.Manager) {
@@ -186,9 +186,9 @@ func AssetConfig(cc transactions.AssetConfigTxnFields, header transactions.Heade
params.Clawback = cc.AssetParams.Clawback
}
- err = balances.PutAssetParams(creator, cc.ConfigAsset, params)
- if err != nil {
- return err
+ paramsErr = balances.PutAssetParams(creator, cc.ConfigAsset, params)
+ if paramsErr != nil {
+ return paramsErr
}
}
diff --git a/ledger/apply/asset_test.go b/ledger/apply/asset_test.go
index 2b7908ac6..802c692c8 100644
--- a/ledger/apply/asset_test.go
+++ b/ledger/apply/asset_test.go
@@ -17,7 +17,6 @@
package apply
import (
- "math/rand"
"testing"
"github.com/stretchr/testify/require"
@@ -29,22 +28,6 @@ import (
"github.com/algorand/go-algorand/test/partitiontest"
)
-func cloneAssetHoldings(m map[basics.AssetIndex]basics.AssetHolding) map[basics.AssetIndex]basics.AssetHolding {
- res := make(map[basics.AssetIndex]basics.AssetHolding, len(m))
- for id, val := range m {
- res[id] = val
- }
- return res
-}
-
-func cloneAssetParams(m map[basics.AssetIndex]basics.AssetParams) map[basics.AssetIndex]basics.AssetParams {
- res := make(map[basics.AssetIndex]basics.AssetParams, len(m))
- for id, val := range m {
- res[id] = val
- }
- return res
-}
-
func TestAssetTransfer(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -117,19 +100,3 @@ func TestAssetTransfer(t *testing.T) {
require.Equal(t, dstAmount-toSend, addrs[cls].Assets[1].Amount)
}
}
-
-var benchTotal int = 0
-
-func BenchmarkAssetCloning(b *testing.B) {
- const numAssets = 800
- assets := make(map[basics.AssetIndex]basics.AssetHolding, numAssets)
- for j := 0; j < numAssets; j++ {
- aidx := basics.AssetIndex(rand.Int63n(100000000))
- assets[aidx] = basics.AssetHolding{Amount: uint64(aidx)}
- }
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- clone := cloneAssetHoldings(assets)
- benchTotal += len(clone) // make sure the compiler does not optimize out cloneAssetHoldings call
- }
-}
diff --git a/ledger/apply/mockBalances_test.go b/ledger/apply/mockBalances_test.go
index a9b838715..3d441b03c 100644
--- a/ledger/apply/mockBalances_test.go
+++ b/ledger/apply/mockBalances_test.go
@@ -23,6 +23,7 @@ import (
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/protocol"
+ "golang.org/x/exp/maps"
)
type mockBalances struct {
@@ -161,18 +162,22 @@ func (b *mockCreatableBalances) GetAssetParams(addr basics.Address, aidx basics.
return
}
+// mapWith returns a new map with the given key and value added to it.
+// maps.Clone would keep nil inputs as nil, so we make() then map.Copy().
+func mapWith[M ~map[K]V, K comparable, V any](m M, k K, v V) M {
+ newMap := make(M, len(m)+1)
+ maps.Copy(newMap, m)
+ newMap[k] = v
+ return newMap
+}
+
func (b *mockCreatableBalances) PutAppParams(addr basics.Address, aidx basics.AppIndex, params basics.AppParams) error {
b.putAppParams++
acct, err := b.access.getAccount(addr, false)
if err != nil {
return err
}
- m := make(map[basics.AppIndex]basics.AppParams, len(acct.AppParams))
- for k, v := range acct.AppParams {
- m[k] = v
- }
- m[aidx] = params
- acct.AppParams = m
+ acct.AppParams = mapWith(acct.AppParams, aidx, params)
return b.access.putAccount(addr, acct)
}
func (b *mockCreatableBalances) PutAppLocalState(addr basics.Address, aidx basics.AppIndex, state basics.AppLocalState) error {
@@ -181,12 +186,7 @@ func (b *mockCreatableBalances) PutAppLocalState(addr basics.Address, aidx basic
if err != nil {
return err
}
- m := make(map[basics.AppIndex]basics.AppLocalState, len(acct.AppLocalStates))
- for k, v := range acct.AppLocalStates {
- m[k] = v
- }
- m[aidx] = state
- acct.AppLocalStates = m
+ acct.AppLocalStates = mapWith(acct.AppLocalStates, aidx, state)
return b.access.putAccount(addr, acct)
}
func (b *mockCreatableBalances) PutAssetHolding(addr basics.Address, aidx basics.AssetIndex, data basics.AssetHolding) error {
@@ -195,12 +195,7 @@ func (b *mockCreatableBalances) PutAssetHolding(addr basics.Address, aidx basics
if err != nil {
return err
}
- m := make(map[basics.AssetIndex]basics.AssetHolding, len(acct.Assets))
- for k, v := range acct.Assets {
- m[k] = v
- }
- m[aidx] = data
- acct.Assets = m
+ acct.Assets = mapWith(acct.Assets, aidx, data)
return b.access.putAccount(addr, acct)
}
func (b *mockCreatableBalances) PutAssetParams(addr basics.Address, aidx basics.AssetIndex, data basics.AssetParams) error {
@@ -209,12 +204,7 @@ func (b *mockCreatableBalances) PutAssetParams(addr basics.Address, aidx basics.
if err != nil {
return err
}
- m := make(map[basics.AssetIndex]basics.AssetParams, len(acct.AssetParams))
- for k, v := range acct.AssetParams {
- m[k] = v
- }
- m[aidx] = data
- acct.AssetParams = m
+ acct.AssetParams = mapWith(acct.AssetParams, aidx, data)
return b.access.putAccount(addr, acct)
}
@@ -224,10 +214,7 @@ func (b *mockCreatableBalances) DeleteAppParams(addr basics.Address, aidx basics
if err != nil {
return err
}
- m := make(map[basics.AppIndex]basics.AppParams, len(acct.AppParams))
- for k, v := range acct.AppParams {
- m[k] = v
- }
+ m := maps.Clone(acct.AppParams)
delete(m, aidx)
acct.AppParams = m
return b.access.putAccount(addr, acct)
@@ -238,10 +225,7 @@ func (b *mockCreatableBalances) DeleteAppLocalState(addr basics.Address, aidx ba
if err != nil {
return err
}
- m := make(map[basics.AppIndex]basics.AppLocalState, len(acct.AppLocalStates))
- for k, v := range acct.AppLocalStates {
- m[k] = v
- }
+ m := maps.Clone(acct.AppLocalStates)
delete(m, aidx)
acct.AppLocalStates = m
return b.access.putAccount(addr, acct)
@@ -252,10 +236,7 @@ func (b *mockCreatableBalances) DeleteAssetHolding(addr basics.Address, aidx bas
if err != nil {
return err
}
- m := make(map[basics.AssetIndex]basics.AssetHolding, len(acct.Assets))
- for k, v := range acct.Assets {
- m[k] = v
- }
+ m := maps.Clone(acct.Assets)
delete(m, aidx)
acct.Assets = m
return b.access.putAccount(addr, acct)
@@ -266,10 +247,7 @@ func (b *mockCreatableBalances) DeleteAssetParams(addr basics.Address, aidx basi
if err != nil {
return err
}
- m := make(map[basics.AssetIndex]basics.AssetParams, len(acct.AssetParams))
- for k, v := range acct.AssetParams {
- m[k] = v
- }
+ m := maps.Clone(acct.AssetParams)
delete(m, aidx)
acct.AssetParams = m
return b.access.putAccount(addr, acct)
diff --git a/ledger/apptxn_test.go b/ledger/apptxn_test.go
index a2d7dba11..982c9f01b 100644
--- a/ledger/apptxn_test.go
+++ b/ledger/apptxn_test.go
@@ -3101,7 +3101,7 @@ itxn_submit
}
if ver <= 33 {
- dl.txgroup("invalid Account reference", &fund0, &fund1, &callTx)
+ dl.txgroup("unavailable Account", &fund0, &fund1, &callTx)
return
}
payset = dl.txgroup("", &fund0, &fund1, &callTx)
diff --git a/ledger/archival_test.go b/ledger/archival_test.go
index de483e227..9b361b703 100644
--- a/ledger/archival_test.go
+++ b/ledger/archival_test.go
@@ -76,7 +76,7 @@ func (wl *wrappedLedger) Latest() basics.Round {
return wl.l.Latest()
}
-func (wl *wrappedLedger) trackerDB() trackerdb.TrackerStore {
+func (wl *wrappedLedger) trackerDB() trackerdb.Store {
return wl.l.trackerDB()
}
diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go
index 355559ef8..8baccf075 100644
--- a/ledger/catchpointtracker.go
+++ b/ledger/catchpointtracker.go
@@ -110,7 +110,7 @@ type catchpointTracker struct {
log logging.Logger
// Connection to the database.
- dbs trackerdb.TrackerStore
+ dbs trackerdb.Store
catchpointStore trackerdb.CatchpointReaderWriter
// The last catchpoint label that was written to the database. Should always align with what's in the database.
@@ -201,6 +201,23 @@ func (ct *catchpointTracker) GetLastCatchpointLabel() string {
return ct.lastCatchpointLabel
}
+func (ct *catchpointTracker) getSPVerificationData() (encodedData []byte, spVerificationHash crypto.Digest, err error) {
+ err = ct.dbs.Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) error {
+ rawData, dbErr := tx.MakeSpVerificationCtxReader().GetAllSPContexts(ctx)
+ if dbErr != nil {
+ return dbErr
+ }
+
+ wrappedData := catchpointStateProofVerificationContext{Data: rawData}
+ spVerificationHash, encodedData = crypto.EncodeAndHash(wrappedData)
+ return nil
+ })
+ if err != nil {
+ return nil, crypto.Digest{}, err
+ }
+ return encodedData, spVerificationHash, nil
+}
+
func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basics.Round, updatingBalancesDuration time.Duration) error {
ct.log.Infof("finishing catchpoint's first stage dbRound: %d", dbRound)
@@ -209,8 +226,16 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic
var totalChunks uint64
var biggestChunkLen uint64
var spVerificationHash crypto.Digest
+ var spVerificationEncodedData []byte
var catchpointGenerationStats telemetryspec.CatchpointGenerationEventDetails
+ // Generate the SP Verification hash and encoded data. The hash is used in the label when tracking catchpoints,
+ // and the encoded data for that hash will be added to the catchpoint file if catchpoint generation is enabled.
+ spVerificationEncodedData, spVerificationHash, err := ct.getSPVerificationData()
+ if err != nil {
+ return err
+ }
+
if ct.enableGeneratingCatchpointFiles {
// Generate the catchpoint file. This is done inline so that it will
// block any new accounts from being written. generateCatchpointData()
@@ -219,8 +244,8 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic
var err error
catchpointGenerationStats.BalancesWriteTime = uint64(updatingBalancesDuration.Nanoseconds())
- totalKVs, totalAccounts, totalChunks, biggestChunkLen, spVerificationHash, err = ct.generateCatchpointData(
- ctx, dbRound, &catchpointGenerationStats)
+ totalKVs, totalAccounts, totalChunks, biggestChunkLen, err = ct.generateCatchpointData(
+ ctx, dbRound, &catchpointGenerationStats, spVerificationEncodedData)
atomic.StoreInt32(&ct.catchpointDataWriting, 0)
if err != nil {
return err
@@ -228,7 +253,7 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic
}
return ct.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error {
- crw, err := tx.MakeCatchpointReaderWriter()
+ cw, err := tx.MakeCatchpointWriter()
if err != nil {
return err
}
@@ -239,7 +264,7 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic
}
// Clear the db record.
- return crw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateWritingFirstStageInfo, 0)
+ return cw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateWritingFirstStageInfo, 0)
})
}
@@ -512,11 +537,11 @@ func (ct *catchpointTracker) commitRound(ctx context.Context, tx trackerdb.Trans
}
}()
- crw, err := tx.MakeCatchpointReaderWriter()
+ cw, err := tx.MakeCatchpointWriter()
if err != nil {
return err
}
- arw, err := tx.MakeAccountsReaderWriter()
+ aw, err := tx.MakeAccountsWriter()
if err != nil {
return err
}
@@ -556,25 +581,25 @@ func (ct *catchpointTracker) commitRound(ctx context.Context, tx trackerdb.Trans
dcc.stats.MerkleTrieUpdateDuration = now - dcc.stats.MerkleTrieUpdateDuration
}
- err = arw.UpdateAccountsHashRound(ctx, treeTargetRound)
+ err = aw.UpdateAccountsHashRound(ctx, treeTargetRound)
if err != nil {
return err
}
if dcc.catchpointFirstStage {
- err = crw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateWritingFirstStageInfo, 1)
+ err = cw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateWritingFirstStageInfo, 1)
if err != nil {
return err
}
}
- err = crw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchpointLookback, dcc.catchpointLookback)
+ err = cw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchpointLookback, dcc.catchpointLookback)
if err != nil {
return err
}
for _, round := range ct.calculateCatchpointRounds(&dcc.deferredCommitRange) {
- err = crw.InsertUnfinishedCatchpoint(ctx, round, dcc.committedRoundDigests[round-dcc.oldBase-1])
+ err = cw.InsertUnfinishedCatchpoint(ctx, round, dcc.committedRoundDigests[round-dcc.oldBase-1])
if err != nil {
return err
}
@@ -950,18 +975,17 @@ func (ct *catchpointTracker) close() {
}
// accountsUpdateBalances applies the given compactAccountDeltas to the merkle trie
-func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccountDeltas, resourcesDeltas compactResourcesDeltas, kvDeltas map[string]modifiedKvValue, oldBase basics.Round, newBase basics.Round) (err error) {
+func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccountDeltas, resourcesDeltas compactResourcesDeltas, kvDeltas map[string]modifiedKvValue, oldBase basics.Round, newBase basics.Round) error {
if !ct.catchpointEnabled() {
return nil
}
- var added, deleted bool
accumulatedChanges := 0
for i := 0; i < accountsDeltas.len(); i++ {
delta := accountsDeltas.getByIdx(i)
if !delta.oldAcct.AccountData.IsEmpty() {
deleteHash := trackerdb.AccountHashBuilderV6(delta.address, &delta.oldAcct.AccountData, protocol.Encode(&delta.oldAcct.AccountData))
- deleted, err = ct.balancesTrie.Delete(deleteHash)
+ deleted, err := ct.balancesTrie.Delete(deleteHash)
if err != nil {
return fmt.Errorf("failed to delete hash '%s' from merkle trie for account %v: %w", hex.EncodeToString(deleteHash), delta.address, err)
}
@@ -974,7 +998,7 @@ func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccoun
if !delta.newAcct.IsEmpty() {
addHash := trackerdb.AccountHashBuilderV6(delta.address, &delta.newAcct, protocol.Encode(&delta.newAcct))
- added, err = ct.balancesTrie.Add(addHash)
+ added, err := ct.balancesTrie.Add(addHash)
if err != nil {
return fmt.Errorf("attempted to add duplicate hash '%s' to merkle trie for account %v: %w", hex.EncodeToString(addHash), delta.address, err)
}
@@ -994,7 +1018,7 @@ func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccoun
if err != nil {
return err
}
- deleted, err = ct.balancesTrie.Delete(deleteHash)
+ deleted, err := ct.balancesTrie.Delete(deleteHash)
if err != nil {
return fmt.Errorf("failed to delete resource hash '%s' from merkle trie for account %v: %w", hex.EncodeToString(deleteHash), addr, err)
}
@@ -1010,7 +1034,7 @@ func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccoun
if err != nil {
return err
}
- added, err = ct.balancesTrie.Add(addHash)
+ added, err := ct.balancesTrie.Add(addHash)
if err != nil {
return fmt.Errorf("attempted to add duplicate resource hash '%s' to merkle trie for account %v: %w", hex.EncodeToString(addHash), addr, err)
}
@@ -1032,7 +1056,7 @@ func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccoun
continue // changed back within the delta span
}
deleteHash := trackerdb.KvHashBuilderV6(key, mv.oldData)
- deleted, err = ct.balancesTrie.Delete(deleteHash)
+ deleted, err := ct.balancesTrie.Delete(deleteHash)
if err != nil {
return fmt.Errorf("failed to delete kv hash '%s' from merkle trie for key %v: %w", hex.EncodeToString(deleteHash), key, err)
}
@@ -1045,7 +1069,7 @@ func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccoun
if mv.data != nil {
addHash := trackerdb.KvHashBuilderV6(key, mv.data)
- added, err = ct.balancesTrie.Add(addHash)
+ added, err := ct.balancesTrie.Add(addHash)
if err != nil {
return fmt.Errorf("attempted to add duplicate kv hash '%s' from merkle trie for key %v: %w", hex.EncodeToString(addHash), key, err)
}
@@ -1059,15 +1083,17 @@ func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccoun
// write it all to disk.
var cstats merkletrie.CommitStats
+ var commitErr error
if accumulatedChanges > 0 {
- cstats, err = ct.balancesTrie.Commit()
+ cstats, commitErr = ct.balancesTrie.Commit()
}
if ct.log.GetTelemetryEnabled() {
root, rootErr := ct.balancesTrie.RootHash()
if rootErr != nil {
+ // log rootErr if failed to fetch for reporting in telemetry, then return whether Commit() succeeded or not
ct.log.Errorf("accountsUpdateBalances: error retrieving balances trie root: %v", rootErr)
- return
+ return commitErr
}
ct.log.EventWithDetails(telemetryspec.Accounts, telemetryspec.CatchpointRootUpdateEvent, telemetryspec.CatchpointRootUpdateEventDetails{
Root: root.String(),
@@ -1084,7 +1110,7 @@ func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccoun
})
}
- return
+ return commitErr
}
// isWritingCatchpointDataFile returns true iff a (first stage) catchpoint data file
@@ -1095,13 +1121,13 @@ func (ct *catchpointTracker) isWritingCatchpointDataFile() bool {
// Generates a (first stage) catchpoint data file.
// The file is built in the following order:
-// - Catchpoint file header (named content.msgpack). The header is generated and appended to the file at the end of the
-// second stage of catchpoint generation.
-// - State proof verification data chunk (named stateProofVerificationContext.msgpack).
-// - Balance and KV chunk (named balances.x.msgpack).
-// ...
-// - Balance and KV chunk (named balances.x.msgpack).
-func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, accountsRound basics.Round, catchpointGenerationStats *telemetryspec.CatchpointGenerationEventDetails) (totalKVs, totalAccounts, totalChunks, biggestChunkLen uint64, spVerificationHash crypto.Digest, err error) {
+// - Catchpoint file header (named content.msgpack). The header is generated and appended to the file at the end of the
+// second stage of catchpoint generation.
+// - State proof verification data chunk (named stateProofVerificationContext.msgpack).
+// - Balance and KV chunk (named balances.x.msgpack).
+// ...
+// - Balance and KV chunk (named balances.x.msgpack).
+func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, accountsRound basics.Round, catchpointGenerationStats *telemetryspec.CatchpointGenerationEventDetails, encodedSPData []byte) (totalKVs, totalAccounts, totalChunks, biggestChunkLen uint64, err error) {
ct.log.Debugf("catchpointTracker.generateCatchpointData() writing catchpoint accounts for round %d", accountsRound)
startTime := time.Now()
@@ -1125,13 +1151,13 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account
start := time.Now()
ledgerGeneratecatchpointCount.Inc(nil)
- err = ct.dbs.TransactionContext(ctx, func(dbCtx context.Context, tx trackerdb.TransactionScope) (err error) {
+ err = ct.dbs.SnapshotContext(ctx, func(dbCtx context.Context, tx trackerdb.SnapshotScope) (err error) {
catchpointWriter, err = makeCatchpointWriter(dbCtx, catchpointDataFilePath, tx, ResourcesPerCatchpointFileChunk)
if err != nil {
return
}
- spVerificationHash, err = catchpointWriter.WriteStateProofVerificationContext()
+ err = catchpointWriter.WriteStateProofVerificationContext(encodedSPData)
if err != nil {
return
}
@@ -1186,7 +1212,7 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account
ledgerGeneratecatchpointMicros.AddMicrosecondsSince(start, nil)
if err != nil {
ct.log.Warnf("catchpointTracker.generateCatchpointData() %v", err)
- return 0, 0, 0, 0, crypto.Digest{}, err
+ return 0, 0, 0, 0, err
}
catchpointGenerationStats.FileSize = uint64(catchpointWriter.writtenBytes)
@@ -1195,41 +1221,40 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account
catchpointGenerationStats.KVsCount = catchpointWriter.totalKVs
catchpointGenerationStats.AccountsRound = uint64(accountsRound)
- return catchpointWriter.totalKVs, catchpointWriter.totalAccounts, catchpointWriter.chunkNum, catchpointWriter.biggestChunkLen, spVerificationHash, nil
+ return catchpointWriter.totalKVs, catchpointWriter.totalAccounts, catchpointWriter.chunkNum, catchpointWriter.biggestChunkLen, nil
}
func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx trackerdb.TransactionScope, catchpointGenerationStats *telemetryspec.CatchpointGenerationEventDetails, accountsRound basics.Round, totalKVs uint64, totalAccounts uint64, totalChunks uint64, biggestChunkLen uint64, stateProofVerificationHash crypto.Digest) error {
- arw, err := tx.MakeAccountsReaderWriter()
+ ar, err := tx.MakeAccountsReader()
if err != nil {
return err
}
- accountTotals, err := arw.AccountsTotals(ctx, false)
+ accountTotals, err := ar.AccountsTotals(ctx, false)
if err != nil {
return err
}
- {
- mc, err := tx.MakeMerkleCommitter(false)
- if err != nil {
- return err
- }
- if ct.balancesTrie == nil {
- trie, err := merkletrie.MakeTrie(mc, trackerdb.TrieMemoryConfig)
- if err != nil {
- return err
- }
- ct.balancesTrie = trie
- } else {
- ct.balancesTrie.SetCommitter(mc)
+ mc, err := tx.MakeMerkleCommitter(false)
+ if err != nil {
+ return err
+ }
+ if ct.balancesTrie == nil {
+ trie, trieErr := merkletrie.MakeTrie(mc, trackerdb.TrieMemoryConfig)
+ if trieErr != nil {
+ return trieErr
}
+ ct.balancesTrie = trie
+ } else {
+ ct.balancesTrie.SetCommitter(mc)
}
+
trieBalancesHash, err := ct.balancesTrie.RootHash()
if err != nil {
return err
}
- crw, err := tx.MakeCatchpointReaderWriter()
+ cw, err := tx.MakeCatchpointWriter()
if err != nil {
return err
}
@@ -1244,7 +1269,7 @@ func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx tracke
StateProofVerificationHash: stateProofVerificationHash,
}
- err = crw.InsertOrReplaceCatchpointFirstStageInfo(ctx, accountsRound, &info)
+ err = cw.InsertOrReplaceCatchpointFirstStageInfo(ctx, accountsRound, &info)
if err != nil {
return err
}
@@ -1332,28 +1357,28 @@ func (ct *catchpointTracker) GetCatchpointStream(round basics.Round) (ReadCloseS
}
if dbFileName != "" {
catchpointPath := filepath.Join(ct.dbDirectory, dbFileName)
- file, err := os.OpenFile(catchpointPath, os.O_RDONLY, 0666)
- if err == nil && file != nil {
+ file, openErr := os.OpenFile(catchpointPath, os.O_RDONLY, 0666)
+ if openErr == nil && file != nil {
return &readCloseSizer{ReadCloser: file, size: fileSize}, nil
}
// else, see if this is a file-not-found error
- if os.IsNotExist(err) {
+ if os.IsNotExist(openErr) {
// the database told us that we have this file.. but we couldn't find it.
// delete it from the database.
- crw, err := ct.dbs.MakeCatchpointReaderWriter()
- if err != nil {
- return nil, err
+ crw, err2 := ct.dbs.MakeCatchpointReaderWriter()
+ if err2 != nil {
+ return nil, err2
}
- err = ct.recordCatchpointFile(context.Background(), crw, round, "", 0)
- if err != nil {
- ct.log.Warnf("catchpointTracker.GetCatchpointStream() unable to delete missing catchpoint entry: %v", err)
- return nil, err
+ err2 = ct.recordCatchpointFile(context.Background(), crw, round, "", 0)
+ if err2 != nil {
+ ct.log.Warnf("catchpointTracker.GetCatchpointStream() unable to delete missing catchpoint entry: %v", err2)
+ return nil, err2
}
return nil, ledgercore.ErrNoEntry{}
}
// it's some other error.
- return nil, fmt.Errorf("catchpointTracker.GetCatchpointStream() unable to open catchpoint file '%s' %v", catchpointPath, err)
+ return nil, fmt.Errorf("catchpointTracker.GetCatchpointStream() unable to open catchpoint file '%s' %v", catchpointPath, openErr)
}
// if the database doesn't know about that round, see if we have that file anyway:
@@ -1388,11 +1413,17 @@ func (ct *catchpointTracker) catchpointEnabled() bool {
// initializeHashes initializes account/resource/kv hashes.
// as part of the initialization, it tests if a hash table matches to account base and updates the former.
func (ct *catchpointTracker) initializeHashes(ctx context.Context, tx trackerdb.TransactionScope, rnd basics.Round) error {
- arw, err := tx.MakeAccountsReaderWriter()
+ ar, err := tx.MakeAccountsReader()
if err != nil {
return err
}
- hashRound, err := arw.AccountsHashRound(ctx)
+
+ aw, err := tx.MakeAccountsWriter()
+ if err != nil {
+ return err
+ }
+
+ hashRound, err := ar.AccountsHashRound(ctx)
if err != nil {
return err
}
@@ -1400,7 +1431,7 @@ func (ct *catchpointTracker) initializeHashes(ctx context.Context, tx trackerdb.
if hashRound != rnd {
// if the hashed round is different then the base round, something was modified, and the accounts aren't in sync
// with the hashes.
- err = arw.ResetAccountHashes(ctx)
+ err = aw.ResetAccountHashes(ctx)
if err != nil {
return err
}
@@ -1438,28 +1469,28 @@ func (ct *catchpointTracker) initializeHashes(ctx context.Context, tx trackerdb.
pendingTrieHashes := 0
totalOrderedAccounts := 0
for {
- accts, processedRows, err := accountBuilderIt.Next(ctx)
- if err == sql.ErrNoRows {
+ accts, processedRows, itErr := accountBuilderIt.Next(ctx)
+ if itErr == sql.ErrNoRows {
// the account builder would return sql.ErrNoRows when no more data is available.
break
- } else if err != nil {
- return err
+ } else if itErr != nil {
+ return itErr
}
if len(accts) > 0 {
trieHashCount += len(accts)
pendingTrieHashes += len(accts)
for _, acct := range accts {
- added, err := trie.Add(acct.Digest)
- if err != nil {
- return fmt.Errorf("initializeHashes was unable to add acct to trie: %v", err)
+ added, addErr := trie.Add(acct.Digest)
+ if addErr != nil {
+ return fmt.Errorf("initializeHashes was unable to add acct to trie: %v", addErr)
}
if !added {
// we need to translate the "addrid" into actual account address so that
// we can report the failure.
- addr, err := arw.LookupAccountAddressFromAddressID(ctx, acct.AccountRef)
- if err != nil {
- ct.log.Warnf("initializeHashes attempted to add duplicate acct hash '%s' to merkle trie for account id %d : %v", hex.EncodeToString(acct.Digest), acct.AccountRef, err)
+ addr, lErr := ar.LookupAccountAddressFromAddressID(ctx, acct.AccountRef)
+ if lErr != nil {
+ ct.log.Warnf("initializeHashes attempted to add duplicate acct hash '%s' to merkle trie for account id %d : %v", hex.EncodeToString(acct.Digest), acct.AccountRef, lErr)
} else {
ct.log.Warnf("initializeHashes attempted to add duplicate acct hash '%s' to merkle trie for account %v", hex.EncodeToString(acct.Digest), addr)
}
@@ -1507,16 +1538,16 @@ func (ct *catchpointTracker) initializeHashes(ctx context.Context, tx trackerdb.
}
defer kvs.Close()
for kvs.Next() {
- k, v, err := kvs.KeyValue()
- if err != nil {
- return err
+ k, v, err2 := kvs.KeyValue()
+ if err2 != nil {
+ return err2
}
hash := trackerdb.KvHashBuilderV6(string(k), v)
trieHashCount++
pendingTrieHashes++
- added, err := trie.Add(hash)
- if err != nil {
- return fmt.Errorf("initializeHashes was unable to add kv (key=%s) to trie: %v", hex.EncodeToString(k), err)
+ added, err2 := trie.Add(hash)
+ if err2 != nil {
+ return fmt.Errorf("initializeHashes was unable to add kv (key=%s) to trie: %v", hex.EncodeToString(k), err2)
}
if !added {
ct.log.Warnf("initializeHashes attempted to add duplicate kv hash '%s' to merkle trie for key %s", hex.EncodeToString(hash), k)
@@ -1524,9 +1555,9 @@ func (ct *catchpointTracker) initializeHashes(ctx context.Context, tx trackerdb.
if pendingTrieHashes >= trieRebuildCommitFrequency {
// this trie Evict will commit using the current transaction.
// if anything goes wrong, it will still get rolled back.
- _, err = trie.Evict(true)
- if err != nil {
- return fmt.Errorf("initializeHashes was unable to commit changes to trie: %v", err)
+ _, err2 = trie.Evict(true)
+ if err2 != nil {
+ return fmt.Errorf("initializeHashes was unable to commit changes to trie: %v", err2)
}
pendingTrieHashes = 0
}
@@ -1541,7 +1572,7 @@ func (ct *catchpointTracker) initializeHashes(ctx context.Context, tx trackerdb.
}
// we've just updated the merkle trie, update the hashRound to reflect that.
- err = arw.UpdateAccountsHashRound(ctx, rnd)
+ err = aw.UpdateAccountsHashRound(ctx, rnd)
if err != nil {
return fmt.Errorf("initializeHashes was unable to update the account hash round to %d: %v", rnd, err)
}
diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go
index 84a103300..6669eab4a 100644
--- a/ledger/catchpointtracker_test.go
+++ b/ledger/catchpointtracker_test.go
@@ -328,9 +328,12 @@ func TestRecordCatchpointFile(t *testing.T) {
}
func createCatchpoint(t *testing.T, ct *catchpointTracker, accountsRound basics.Round, ml *mockLedgerForTracker, round basics.Round) {
+ spVerificationEncodedData, stateProofVerificationHash, err := ct.getSPVerificationData()
+ require.NoError(t, err)
+
var catchpointGenerationStats telemetryspec.CatchpointGenerationEventDetails
- _, _, _, biggestChunkLen, stateProofVerificationHash, err := ct.generateCatchpointData(
- context.Background(), accountsRound, &catchpointGenerationStats)
+ _, _, _, biggestChunkLen, err := ct.generateCatchpointData(
+ context.Background(), accountsRound, &catchpointGenerationStats, spVerificationEncodedData)
require.NoError(t, err)
require.Equal(t, calculateStateProofVerificationHash(t, ml), stateProofVerificationHash)
@@ -401,9 +404,7 @@ func writeDummySpVerification(t *testing.T, nextIndexForContext uint64, numberOf
e.LastAttestedRound = basics.Round(nextIndexForContext + i)
contexts[i] = &e
}
- writer := tx.MakeSpVerificationCtxReaderWriter()
-
- return writer.StoreSPContexts(ctx, contexts[:])
+ return tx.MakeSpVerificationCtxWriter().StoreSPContexts(ctx, contexts[:])
})
require.NoError(t, err)
}
@@ -436,7 +437,7 @@ func BenchmarkLargeCatchpointDataWriting(b *testing.B) {
// at this point, the database was created. We want to fill the accounts data
accountsNumber := 6000000 * b.N
err = ml.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
- arw, err := tx.MakeAccountsReaderWriter()
+ aw, err := tx.MakeAccountsWriter()
if err != nil {
return err
}
@@ -457,13 +458,15 @@ func BenchmarkLargeCatchpointDataWriting(b *testing.B) {
}
}
- return arw.UpdateAccountsHashRound(ctx, 1)
+ return aw.UpdateAccountsHashRound(ctx, 1)
})
require.NoError(b, err)
var catchpointGenerationStats telemetryspec.CatchpointGenerationEventDetails
+ encodedSPData, _, err := ct.getSPVerificationData()
+ require.NoError(b, err)
b.ResetTimer()
- ct.generateCatchpointData(context.Background(), basics.Round(0), &catchpointGenerationStats)
+ ct.generateCatchpointData(context.Background(), basics.Round(0), &catchpointGenerationStats, encodedSPData)
b.StopTimer()
b.ReportMetric(float64(accountsNumber), "accounts")
}
@@ -471,7 +474,7 @@ func BenchmarkLargeCatchpointDataWriting(b *testing.B) {
func TestCatchpointReproducibleLabels(t *testing.T) {
partitiontest.PartitionTest(t)
- if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
+ if runtime.GOARCH == "arm" {
t.Skip("This test is too slow on ARM and causes CI builds to time out")
}
@@ -594,7 +597,12 @@ func TestCatchpointReproducibleLabels(t *testing.T) {
ml2 := ledgerHistory[rnd]
require.NotNil(t, ml2)
- ct2 := newCatchpointTracker(t, ml2, cfg, ".")
+ cfg2 := cfg
+ // every other iteration modify CatchpointTracking to ensure labels generation does not depends on catchpoint file creation
+ if rnd%2 == 0 {
+ cfg2.CatchpointTracking = int64(crypto.RandUint63())%2 + 1 //values 1 or 2
+ }
+ ct2 := newCatchpointTracker(t, ml2, cfg2, ".")
defer ct2.close()
for i := rnd + 1; i <= lastRound; i++ {
blk := bookkeeping.Block{
@@ -1331,20 +1339,20 @@ func TestCatchpointSecondStagePersistence(t *testing.T) {
err = os.WriteFile(catchpointDataFilePath, catchpointData, 0644)
require.NoError(t, err)
- cps2, err := ml2.dbs.MakeCatchpointReaderWriter()
+ cw2, err := ml2.dbs.MakeCatchpointWriter()
require.NoError(t, err)
// Restore the first stage database record.
- err = cps2.InsertOrReplaceCatchpointFirstStageInfo(context.Background(), firstStageRound, &firstStageInfo)
+ err = cw2.InsertOrReplaceCatchpointFirstStageInfo(context.Background(), firstStageRound, &firstStageInfo)
require.NoError(t, err)
// Insert unfinished catchpoint record.
- err = cps2.InsertUnfinishedCatchpoint(
+ err = cw2.InsertUnfinishedCatchpoint(
context.Background(), secondStageRound, crypto.Digest{})
require.NoError(t, err)
// Delete the catchpoint file database record.
- err = cps2.StoreCatchpoint(
+ err = cw2.StoreCatchpoint(
context.Background(), secondStageRound, "", "", 0)
require.NoError(t, err)
diff --git a/ledger/catchpointwriter.go b/ledger/catchpointwriter.go
index df8b72ba6..763c6f9a3 100644
--- a/ledger/catchpointwriter.go
+++ b/ledger/catchpointwriter.go
@@ -24,7 +24,6 @@ import (
"os"
"path/filepath"
- "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/ledger/encoded"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/ledger/store/trackerdb"
@@ -53,7 +52,7 @@ const (
// has the option of throttling the CPU utilization in between the calls.
type catchpointWriter struct {
ctx context.Context
- tx trackerdb.TransactionScope
+ tx trackerdb.SnapshotScope
filePath string
totalAccounts uint64
totalKVs uint64
@@ -107,18 +106,18 @@ func (data catchpointStateProofVerificationContext) ToBeHashed() (protocol.HashI
return protocol.StateProofVerCtx, protocol.Encode(&data)
}
-func makeCatchpointWriter(ctx context.Context, filePath string, tx trackerdb.TransactionScope, maxResourcesPerChunk int) (*catchpointWriter, error) {
- arw, err := tx.MakeAccountsReaderWriter()
+func makeCatchpointWriter(ctx context.Context, filePath string, tx trackerdb.SnapshotScope, maxResourcesPerChunk int) (*catchpointWriter, error) {
+ aw, err := tx.MakeAccountsReader()
if err != nil {
return nil, err
}
- totalAccounts, err := arw.TotalAccounts(ctx)
+ totalAccounts, err := aw.TotalAccounts(ctx)
if err != nil {
return nil, err
}
- totalKVs, err := arw.TotalKVs(ctx)
+ totalKVs, err := aw.TotalKVs(ctx)
if err != nil {
return nil, err
}
@@ -160,35 +159,27 @@ func (cw *catchpointWriter) Abort() error {
return os.Remove(cw.filePath)
}
-func (cw *catchpointWriter) WriteStateProofVerificationContext() (crypto.Digest, error) {
- rawData, err := cw.tx.MakeSpVerificationCtxReaderWriter().GetAllSPContexts(cw.ctx)
- if err != nil {
- return crypto.Digest{}, err
- }
-
- wrappedData := catchpointStateProofVerificationContext{Data: rawData}
- dataHash, encodedData := crypto.EncodeAndHash(wrappedData)
-
- err = cw.tar.WriteHeader(&tar.Header{
+func (cw *catchpointWriter) WriteStateProofVerificationContext(encodedData []byte) error {
+ err := cw.tar.WriteHeader(&tar.Header{
Name: catchpointSPVerificationFileName,
Mode: 0600,
Size: int64(len(encodedData)),
})
if err != nil {
- return crypto.Digest{}, err
+ return err
}
_, err = cw.tar.Write(encodedData)
if err != nil {
- return crypto.Digest{}, err
+ return err
}
if chunkLen := uint64(len(encodedData)); cw.biggestChunkLen < chunkLen {
cw.biggestChunkLen = chunkLen
}
- return dataHash, nil
+ return nil
}
// WriteStep works for a short period of time (determined by stepCtx) to get
diff --git a/ledger/catchpointwriter_test.go b/ledger/catchpointwriter_test.go
index db54b4527..d985317a1 100644
--- a/ledger/catchpointwriter_test.go
+++ b/ledger/catchpointwriter_test.go
@@ -151,7 +151,13 @@ func verifyStateProofVerificationContextWrite(t *testing.T, data []ledgercore.St
if err != nil {
return err
}
- _, err = writer.WriteStateProofVerificationContext()
+ rawData, err := tx.MakeSpVerificationCtxReader().GetAllSPContexts(ctx)
+ if err != nil {
+ return err
+ }
+ _, encodedData := crypto.EncodeAndHash(catchpointStateProofVerificationContext{Data: rawData})
+
+ err = writer.WriteStateProofVerificationContext(encodedData)
if err != nil {
return err
}
@@ -260,7 +266,12 @@ func TestBasicCatchpointWriter(t *testing.T) {
if err != nil {
return err
}
- _, err = writer.WriteStateProofVerificationContext()
+ rawData, err := tx.MakeSpVerificationCtxReader().GetAllSPContexts(ctx)
+ if err != nil {
+ return err
+ }
+ _, encodedData := crypto.EncodeAndHash(catchpointStateProofVerificationContext{Data: rawData})
+ err = writer.WriteStateProofVerificationContext(encodedData)
if err != nil {
return err
}
@@ -284,7 +295,7 @@ func TestBasicCatchpointWriter(t *testing.T) {
require.Equal(t, uint64(len(accts)), uint64(len(chunk.Balances)))
}
-func testWriteCatchpoint(t *testing.T, rdb trackerdb.TrackerStore, datapath string, filepath string, maxResourcesPerChunk int) CatchpointFileHeader {
+func testWriteCatchpoint(t *testing.T, rdb trackerdb.Store, datapath string, filepath string, maxResourcesPerChunk int) CatchpointFileHeader {
var totalAccounts uint64
var totalChunks uint64
var biggestChunkLen uint64
@@ -300,11 +311,16 @@ func testWriteCatchpoint(t *testing.T, rdb trackerdb.TrackerStore, datapath stri
return err
}
- arw, err := tx.MakeAccountsReaderWriter()
+ ar, err := tx.MakeAccountsReader()
+ if err != nil {
+ return err
+ }
+ rawData, err := tx.MakeSpVerificationCtxReader().GetAllSPContexts(ctx)
if err != nil {
return err
}
- _, err = writer.WriteStateProofVerificationContext()
+ _, encodedData := crypto.EncodeAndHash(catchpointStateProofVerificationContext{Data: rawData})
+ err = writer.WriteStateProofVerificationContext(encodedData)
if err != nil {
return err
}
@@ -318,11 +334,11 @@ func testWriteCatchpoint(t *testing.T, rdb trackerdb.TrackerStore, datapath stri
totalAccounts = writer.totalAccounts
totalChunks = writer.chunkNum
biggestChunkLen = writer.biggestChunkLen
- accountsRnd, err = arw.AccountsRound()
+ accountsRnd, err = ar.AccountsRound()
if err != nil {
return
}
- totals, err = arw.AccountsTotals(ctx, false)
+ totals, err = ar.AccountsTotals(ctx, false)
return
})
require.NoError(t, err)
@@ -420,12 +436,12 @@ func TestCatchpointReadDatabaseOverflowSingleAccount(t *testing.T) {
cw, err := makeCatchpointWriter(context.Background(), catchpointDataFilePath, tx, maxResourcesPerChunk)
require.NoError(t, err)
- arw, err := tx.MakeAccountsReaderWriter()
+ ar, err := tx.MakeAccountsReader()
if err != nil {
return err
}
- expectedTotalResources, err := arw.TotalResources(ctx)
+ expectedTotalResources, err := ar.TotalResources(ctx)
if err != nil {
return err
}
@@ -506,17 +522,17 @@ func TestCatchpointReadDatabaseOverflowAccounts(t *testing.T) {
catchpointDataFilePath := filepath.Join(temporaryDirectory, "15.data")
err = ml.trackerDB().Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
- arw, err := tx.MakeAccountsReaderWriter()
+ ar, err := tx.MakeAccountsReader()
if err != nil {
return err
}
- expectedTotalAccounts, err := arw.TotalAccounts(ctx)
+ expectedTotalAccounts, err := ar.TotalAccounts(ctx)
if err != nil {
return err
}
- expectedTotalResources, err := arw.TotalResources(ctx)
+ expectedTotalResources, err := ar.TotalResources(ctx)
if err != nil {
return err
}
@@ -602,7 +618,7 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) {
ctx := context.Background()
err = l.trackerDBs.TransactionContext(ctx, func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
- arw, err := tx.MakeAccountsReaderWriter()
+ aw, err := tx.MakeAccountsWriter()
if err != nil {
return nil
}
@@ -618,7 +634,7 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) {
require.NotEmpty(t, h1)
// reset hashes
- err = arw.ResetAccountHashes(ctx)
+ err = aw.ResetAccountHashes(ctx)
require.NoError(t, err)
// rebuild the MT
@@ -664,7 +680,7 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) {
require.NoError(t, err)
}
-func testNewLedgerFromCatchpoint(t *testing.T, catchpointWriterReadAccess trackerdb.TrackerStore, filepath string) *Ledger {
+func testNewLedgerFromCatchpoint(t *testing.T, catchpointWriterReadAccess trackerdb.Store, filepath string) *Ledger {
// create a ledger.
var initState ledgercore.InitState
initState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion
@@ -686,7 +702,7 @@ func testNewLedgerFromCatchpoint(t *testing.T, catchpointWriterReadAccess tracke
err = accessor.BuildMerkleTrie(context.Background(), nil)
require.NoError(t, err)
- err = l.trackerDBs.Batch(func(ctx context.Context, tx trackerdb.BatchScope) error {
+ err = l.trackerDBs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error {
cw, err := tx.MakeCatchpointWriter()
if err != nil {
return err
@@ -696,7 +712,7 @@ func testNewLedgerFromCatchpoint(t *testing.T, catchpointWriterReadAccess tracke
})
require.NoError(t, err)
- balanceTrieStats := func(db trackerdb.TrackerStore) merkletrie.Stats {
+ balanceTrieStats := func(db trackerdb.Store) merkletrie.Stats {
var stats merkletrie.Stats
err = db.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
committer, err := tx.MakeMerkleCommitter(false)
diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go
index ec33f2ac5..b16d3a8fb 100644
--- a/ledger/catchupaccessor.go
+++ b/ledger/catchupaccessor.go
@@ -67,6 +67,9 @@ type CatchpointCatchupAccessor interface {
// GetCatchupBlockRound returns the latest block round matching the current catchpoint
GetCatchupBlockRound(ctx context.Context) (round basics.Round, err error)
+ // GetVerifyData returns the balances hash, spver hash and totals used by VerifyCatchpoint
+ GetVerifyData(ctx context.Context) (balancesHash crypto.Digest, spverHash crypto.Digest, totals ledgercore.AccountTotals, err error)
+
// VerifyCatchpoint verifies that the catchpoint is valid by reconstructing the label.
VerifyCatchpoint(ctx context.Context, blk *bookkeeping.Block) (err error)
@@ -103,7 +106,7 @@ type stagingWriter interface {
}
type stagingWriterImpl struct {
- wdb trackerdb.TrackerStore
+ wdb trackerdb.Store
}
func (w *stagingWriterImpl) writeBalances(ctx context.Context, balances []trackerdb.NormalizedAccountBalance) error {
@@ -304,7 +307,7 @@ func (c *catchpointCatchupAccessorImpl) ResetStagingBalances(ctx context.Context
}
start := time.Now()
ledgerResetstagingbalancesCount.Inc(nil)
- err = c.ledger.trackerDB().Batch(func(ctx context.Context, tx trackerdb.BatchScope) (err error) {
+ err = c.ledger.trackerDB().Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
crw, err := tx.MakeCatchpointWriter()
if err != nil {
return err
@@ -423,7 +426,7 @@ func (c *catchpointCatchupAccessorImpl) processStagingContent(ctx context.Contex
// TotalAccounts, TotalAccounts, Catchpoint, BlockHeaderDigest, BalancesRound
start := time.Now()
ledgerProcessstagingcontentCount.Inc(nil)
- err = c.ledger.trackerDB().Batch(func(ctx context.Context, tx trackerdb.BatchScope) (err error) {
+ err = c.ledger.trackerDB().Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
cw, err := tx.MakeCatchpointWriter()
if err != nil {
return err
@@ -603,9 +606,9 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
wg.Add(1)
go func() {
defer wg.Done()
- start := time.Now()
+ writeBalancesStart := time.Now()
errBalances = c.stagingWriter.writeBalances(ctx, normalizedAccountBalances)
- durBalances = time.Since(start)
+ durBalances = time.Since(writeBalancesStart)
}()
// on a in-memory database, wait for the writer to finish before starting the new writer
@@ -627,9 +630,9 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
}
}
if hasCreatables {
- start := time.Now()
+ writeCreatablesStart := time.Now()
errCreatables = c.stagingWriter.writeCreatables(ctx, normalizedAccountBalances)
- durCreatables = time.Since(start)
+ durCreatables = time.Since(writeCreatablesStart)
}
}()
@@ -642,9 +645,9 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
wg.Add(1)
go func() {
defer wg.Done()
- start := time.Now()
+ writeHashesStart := time.Now()
errHashes = c.stagingWriter.writeHashes(ctx, normalizedAccountBalances)
- durHashes = time.Since(start)
+ durHashes = time.Since(writeHashesStart)
}()
// on a in-memory database, wait for the writer to finish before starting the new writer
@@ -657,9 +660,9 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
go func() {
defer wg.Done()
- start := time.Now()
+ writeKVsStart := time.Now()
errKVs = c.stagingWriter.writeKVs(ctx, chunkKVs)
- durKVs = time.Since(start)
+ durKVs = time.Since(writeKVsStart)
}()
wg.Wait()
@@ -723,7 +726,7 @@ func countHashes(hashes [][]byte) (accountCount, kvCount uint64) {
// BuildMerkleTrie would process the catchpointpendinghashes and insert all the items in it into the merkle trie
func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, progressUpdates func(uint64, uint64)) (err error) {
dbs := c.ledger.trackerDB()
- err = dbs.Batch(func(ctx context.Context, tx trackerdb.BatchScope) (err error) {
+ err = dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
crw, err := tx.MakeCatchpointWriter()
if err != nil {
return err
@@ -754,7 +757,8 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro
defer wg.Done()
defer close(writerQueue)
- err := dbs.Snapshot(func(transactionCtx context.Context, tx trackerdb.SnapshotScope) (err error) {
+ // Note: this needs to be accessed on a snapshot to guarantee a concurrent read-only access to the sqlite db
+ dbErr := dbs.Snapshot(func(transactionCtx context.Context, tx trackerdb.SnapshotScope) (err error) {
it := tx.MakeCatchpointPendingHashesIterator(trieRebuildAccountChunkSize)
var hashes [][]byte
for {
@@ -778,8 +782,8 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro
db.ResetTransactionWarnDeadline(transactionCtx, tx, time.Now().Add(5*time.Second))
return err
})
- if err != nil {
- errChan <- err
+ if dbErr != nil {
+ errChan <- dbErr
}
}()
@@ -792,7 +796,7 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro
accountHashesWritten, kvHashesWritten := uint64(0), uint64(0)
var mc trackerdb.MerkleCommitter
- err := dbs.Transaction(func(transactionCtx context.Context, tx trackerdb.TransactionScope) (err error) {
+ txErr := dbs.Transaction(func(transactionCtx context.Context, tx trackerdb.TransactionScope) (err error) {
// create the merkle trie for the balances
mc, err = tx.MakeMerkleCommitter(true)
if err != nil {
@@ -802,8 +806,8 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro
trie, err = merkletrie.MakeTrie(mc, trackerdb.TrieMemoryConfig)
return err
})
- if err != nil {
- errChan <- err
+ if txErr != nil {
+ errChan <- txErr
return
}
@@ -821,7 +825,7 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro
continue
}
- err = dbs.Transaction(func(transactionCtx context.Context, tx trackerdb.TransactionScope) (err error) {
+ txErr = dbs.Transaction(func(transactionCtx context.Context, tx trackerdb.TransactionScope) (err error) {
mc, err = tx.MakeMerkleCommitter(true)
if err != nil {
return
@@ -846,12 +850,12 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro
return nil
})
- if err != nil {
+ if txErr != nil {
break
}
if uncommitedHashesCount >= trieRebuildCommitFrequency {
- err = dbs.Transaction(func(transactionCtx context.Context, tx trackerdb.TransactionScope) (err error) {
+ txErr = dbs.Transaction(func(transactionCtx context.Context, tx trackerdb.TransactionScope) (err error) {
// set a long 30-second window for the evict before warning is generated.
_, err = tx.ResetTransactionWarnDeadline(transactionCtx, time.Now().Add(30*time.Second))
if err != nil {
@@ -869,7 +873,7 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro
uncommitedHashesCount = 0
return nil
})
- if err != nil {
+ if txErr != nil {
keepWriting = false
continue
}
@@ -879,12 +883,12 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro
progressUpdates(accountHashesWritten, kvHashesWritten)
}
}
- if err != nil {
- errChan <- err
+ if txErr != nil {
+ errChan <- txErr
return
}
if uncommitedHashesCount > 0 {
- err = dbs.Transaction(func(transactionCtx context.Context, tx trackerdb.TransactionScope) (err error) {
+ txErr = dbs.Transaction(func(transactionCtx context.Context, tx trackerdb.TransactionScope) (err error) {
// set a long 30-second window for the evict before warning is generated.
_, err = tx.ResetTransactionWarnDeadline(transactionCtx, time.Now().Add(30*time.Second))
if err != nil {
@@ -900,8 +904,8 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro
})
}
- if err != nil {
- errChan <- err
+ if txErr != nil {
+ errChan <- txErr
}
}()
@@ -926,36 +930,11 @@ func (c *catchpointCatchupAccessorImpl) GetCatchupBlockRound(ctx context.Context
return basics.Round(iRound), nil
}
-// VerifyCatchpoint verifies that the catchpoint is valid by reconstructing the label.
-func (c *catchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, blk *bookkeeping.Block) (err error) {
- var balancesHash crypto.Digest
+func (c *catchpointCatchupAccessorImpl) GetVerifyData(ctx context.Context) (balancesHash crypto.Digest, spverHash crypto.Digest, totals ledgercore.AccountTotals, err error) {
var rawStateProofVerificationContext []ledgercore.StateProofVerificationContext
- var blockRound basics.Round
- var totals ledgercore.AccountTotals
- var catchpointLabel string
- var version uint64
-
- catchpointLabel, err = c.catchpointStore.ReadCatchpointStateString(ctx, trackerdb.CatchpointStateCatchupLabel)
- if err != nil {
- return fmt.Errorf("unable to read catchpoint catchup state '%s': %v", trackerdb.CatchpointStateCatchupLabel, err)
- }
-
- version, err = c.catchpointStore.ReadCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupVersion)
- if err != nil {
- return fmt.Errorf("unable to retrieve catchpoint version: %v", err)
- }
- var iRound uint64
- iRound, err = c.catchpointStore.ReadCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupBlockRound)
- if err != nil {
- return fmt.Errorf("unable to read catchpoint catchup state '%s': %v", trackerdb.CatchpointStateCatchupBlockRound, err)
- }
- blockRound = basics.Round(iRound)
-
- start := time.Now()
- ledgerVerifycatchpointCount.Inc(nil)
err = c.ledger.trackerDB().Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
- arw, err := tx.MakeAccountsReaderWriter()
+ ar, err := tx.MakeAccountsReader()
if err != nil {
return err
}
@@ -976,18 +955,54 @@ func (c *catchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, bl
return fmt.Errorf("unable to get trie root hash: %v", err)
}
- totals, err = arw.AccountsTotals(ctx, true)
+ totals, err = ar.AccountsTotals(ctx, true)
if err != nil {
return fmt.Errorf("unable to get accounts totals: %v", err)
}
- rawStateProofVerificationContext, err = tx.MakeSpVerificationCtxReaderWriter().GetAllSPContextsFromCatchpointTbl(ctx)
+ rawStateProofVerificationContext, err = tx.MakeSpVerificationCtxReader().GetAllSPContextsFromCatchpointTbl(ctx)
if err != nil {
return fmt.Errorf("unable to get state proof verification data: %v", err)
}
return
})
+ if err != nil {
+ return crypto.Digest{}, crypto.Digest{}, ledgercore.AccountTotals{}, err
+ }
+
+ wrappedContext := catchpointStateProofVerificationContext{Data: rawStateProofVerificationContext}
+ spverHash = crypto.HashObj(wrappedContext)
+
+ return balancesHash, spverHash, totals, err
+}
+
+// VerifyCatchpoint verifies that the catchpoint is valid by reconstructing the label.
+func (c *catchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, blk *bookkeeping.Block) (err error) {
+ var blockRound basics.Round
+ var catchpointLabel string
+ var version uint64
+
+ catchpointLabel, err = c.catchpointStore.ReadCatchpointStateString(ctx, trackerdb.CatchpointStateCatchupLabel)
+ if err != nil {
+ return fmt.Errorf("unable to read catchpoint catchup state '%s': %v", trackerdb.CatchpointStateCatchupLabel, err)
+ }
+
+ version, err = c.catchpointStore.ReadCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupVersion)
+ if err != nil {
+ return fmt.Errorf("unable to retrieve catchpoint version: %v", err)
+ }
+
+ var iRound uint64
+ iRound, err = c.catchpointStore.ReadCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupBlockRound)
+ if err != nil {
+ return fmt.Errorf("unable to read catchpoint catchup state '%s': %v", trackerdb.CatchpointStateCatchupBlockRound, err)
+ }
+ blockRound = basics.Round(iRound)
+
+ start := time.Now()
+ ledgerVerifycatchpointCount.Inc(nil)
+ balancesHash, spVerificationHash, totals, err := c.GetVerifyData(ctx)
ledgerVerifycatchpointMicros.AddMicrosecondsSince(start, nil)
if err != nil {
return err
@@ -996,9 +1011,6 @@ func (c *catchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, bl
return fmt.Errorf("block round in block header doesn't match block round in catchpoint: %d != %d", blockRound, blk.Round())
}
- wrappedContext := catchpointStateProofVerificationContext{Data: rawStateProofVerificationContext}
- spVerificationHash := crypto.HashObj(wrappedContext)
-
var catchpointLabelMaker ledgercore.CatchpointLabelMaker
blockDigest := blk.Digest()
if version <= CatchpointFileVersionV6 {
@@ -1026,7 +1038,7 @@ func (c *catchpointCatchupAccessorImpl) StoreBalancesRound(ctx context.Context,
balancesRound := blk.Round() - basics.Round(catchpointLookback)
start := time.Now()
ledgerStorebalancesroundCount.Inc(nil)
- err = c.ledger.trackerDB().Batch(func(ctx context.Context, tx trackerdb.BatchScope) (err error) {
+ err = c.ledger.trackerDB().Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
crw, err := tx.MakeCatchpointWriter()
if err != nil {
return err
@@ -1132,7 +1144,12 @@ func (c *catchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err
return err
}
- arw, err := tx.MakeAccountsReaderWriter()
+ ar, err := tx.MakeAccountsReader()
+ if err != nil {
+ return err
+ }
+
+ aw, err := tx.MakeAccountsWriter()
if err != nil {
return err
}
@@ -1150,13 +1167,13 @@ func (c *catchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err
return err
}
- totals, err = arw.AccountsTotals(ctx, true)
+ totals, err = ar.AccountsTotals(ctx, true)
if err != nil {
return err
}
if hashRound == 0 {
- err = arw.ResetAccountHashes(ctx)
+ err = aw.ResetAccountHashes(ctx)
if err != nil {
return err
}
@@ -1168,7 +1185,7 @@ func (c *catchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err
// it might be necessary to restore it into the latest database version. To do that, one
// will need to run the 6->7 migration code manually here or in a similar function to create
// onlineaccounts and other V7 tables.
- err = arw.AccountsReset(ctx)
+ err = aw.AccountsReset(ctx)
if err != nil {
return err
}
@@ -1193,7 +1210,7 @@ func (c *catchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err
return err
}
- err = arw.AccountsPutTotals(totals, false)
+ err = aw.AccountsPutTotals(totals, false)
if err != nil {
return err
}
diff --git a/ledger/encoded/msgp_gen.go b/ledger/encoded/msgp_gen.go
index 189a6b73b..cd9c99aaa 100644
--- a/ledger/encoded/msgp_gen.go
+++ b/ledger/encoded/msgp_gen.go
@@ -6,6 +6,8 @@ import (
"sort"
"github.com/algorand/msgp/msgp"
+
+ "github.com/algorand/go-algorand/data/basics"
)
// The following msgp objects are implemented in this file:
@@ -16,6 +18,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> BalanceRecordV5MaxSize()
//
// BalanceRecordV6
// |-----> (*) MarshalMsg
@@ -24,6 +27,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> BalanceRecordV6MaxSize()
//
// KVRecordV6
// |-----> (*) MarshalMsg
@@ -32,6 +36,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> KVRecordV6MaxSize()
//
// MarshalMsg implements msgp.Marshaler
@@ -163,6 +168,13 @@ func (z *BalanceRecordV5) MsgIsZero() bool {
return ((*z).Address.MsgIsZero()) && ((*z).AccountData.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func BalanceRecordV5MaxSize() (s int) {
+ s = 1 + 3 + basics.AddressMaxSize() + 3
+ panic("Unable to determine max size: MaxSize() not implemented for Raw type")
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *BalanceRecordV6) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -417,6 +429,21 @@ func (z *BalanceRecordV6) MsgIsZero() bool {
return ((*z).Address.MsgIsZero()) && ((*z).AccountData.MsgIsZero()) && (len((*z).Resources) == 0) && ((*z).ExpectingMoreEntries == false)
}
+// MaxSize returns a maximum valid message size for this message type
+func BalanceRecordV6MaxSize() (s int) {
+ s = 1 + 2 + basics.AddressMaxSize() + 2
+ panic("Unable to determine max size: MaxSize() not implemented for Raw type")
+ s += 2
+ s += msgp.MapHeaderSize
+ // Adding size of map keys for z.Resources
+ s += resourcesPerCatchpointFileChunkBackwardCompatible * (msgp.Uint64Size)
+ // Adding size of map values for z.Resources
+ s += resourcesPerCatchpointFileChunkBackwardCompatible
+ panic("Unable to determine max size: MaxSize() not implemented for Raw type")
+ s += 2 + msgp.BoolSize
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *KVRecordV6) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -585,3 +612,9 @@ func (z *KVRecordV6) Msgsize() (s int) {
func (z *KVRecordV6) MsgIsZero() bool {
return (len((*z).Key) == 0) && (len((*z).Value) == 0)
}
+
+// MaxSize returns a maximum valid message size for this message type
+func KVRecordV6MaxSize() (s int) {
+ s = 1 + 2 + msgp.BytesPrefixSize + KVRecordV6MaxKeyLength + 2 + msgp.BytesPrefixSize + KVRecordV6MaxValueLength
+ return
+}
diff --git a/ledger/eval/cow.go b/ledger/eval/cow.go
index c58f65fc5..8019dc69f 100644
--- a/ledger/eval/cow.go
+++ b/ledger/eval/cow.go
@@ -27,6 +27,7 @@ import (
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/protocol"
+ "golang.org/x/exp/maps"
)
// ___________________
@@ -336,13 +337,9 @@ func (cb *roundCowState) reset() {
cb.proto = config.ConsensusParams{}
cb.mods.Reset()
cb.txnCount = 0
- for addr := range cb.sdeltas {
- delete(cb.sdeltas, addr)
- }
+ maps.Clear(cb.sdeltas)
cb.compatibilityMode = false
- for addr := range cb.compatibilityGetKeyCache {
- delete(cb.compatibilityGetKeyCache, addr)
- }
+ maps.Clear(cb.compatibilityGetKeyCache)
cb.prevTotals = ledgercore.AccountTotals{}
}
diff --git a/ledger/eval/cow_test.go b/ledger/eval/cow_test.go
index df33df168..225b03799 100644
--- a/ledger/eval/cow_test.go
+++ b/ledger/eval/cow_test.go
@@ -104,7 +104,7 @@ func (ml *mockLedger) BlockHdr(rnd basics.Round) (bookkeeping.BlockHeader, error
}
func (ml *mockLedger) blockHdrCached(rnd basics.Round) (bookkeeping.BlockHeader, error) {
- return ml.blockHdrCached(rnd)
+ return ml.BlockHdr(rnd)
}
func (ml *mockLedger) GetStateProofVerificationContext(rnd basics.Round) (*ledgercore.StateProofVerificationContext, error) {
diff --git a/ledger/eval/eval.go b/ledger/eval/eval.go
index 0607f5a38..fcebc8ffe 100644
--- a/ledger/eval/eval.go
+++ b/ledger/eval/eval.go
@@ -157,10 +157,10 @@ func makeRoundCowBase(l LedgerForCowBase, rnd basics.Round, txnCount uint64, sta
}
func (x *roundCowBase) getCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
- creatable := creatable{cindex: cidx, ctype: ctype}
+ c := creatable{cindex: cidx, ctype: ctype}
- if foundAddress, ok := x.creators[creatable]; ok {
- return foundAddress.address, foundAddress.exists, nil
+ if fa, ok := x.creators[c]; ok {
+ return fa.address, fa.exists, nil
}
address, exists, err := x.l.GetCreatorForRound(x.rnd, cidx, ctype)
@@ -169,7 +169,7 @@ func (x *roundCowBase) getCreator(cidx basics.CreatableIndex, ctype basics.Creat
"roundCowBase.getCreator() cidx: %d ctype: %v err: %w", cidx, ctype, err)
}
- x.creators[creatable] = foundAddress{address: address, exists: exists}
+ x.creators[c] = foundAddress{address: address, exists: exists}
return address, exists, nil
}
@@ -734,9 +734,9 @@ func StartEvaluator(l LedgerForEvaluator, hdr bookkeeping.BlockHeader, evalOpts
eval.state = makeRoundCowState(base, eval.block.BlockHeader, proto, eval.prevHeader.TimeStamp, prevTotals, evalOpts.PaysetHint)
if evalOpts.Validate {
- err := eval.block.BlockHeader.PreCheck(eval.prevHeader)
- if err != nil {
- return nil, err
+ preCheckErr := eval.block.BlockHeader.PreCheck(eval.prevHeader)
+ if preCheckErr != nil {
+ return nil, preCheckErr
}
// Check that the rewards rate, level and residue match expected values
@@ -1097,16 +1097,16 @@ func (eval *BlockEvaluator) transaction(txn transactions.SignedTxn, evalParams *
}
// Transaction already in the ledger?
- err := cow.checkDup(txn.Txn.First(), txn.Txn.Last(), txid, ledgercore.Txlease{Sender: txn.Txn.Sender, Lease: txn.Txn.Lease})
+ err = cow.checkDup(txn.Txn.First(), txn.Txn.Last(), txid, ledgercore.Txlease{Sender: txn.Txn.Sender, Lease: txn.Txn.Lease})
if err != nil {
return err
}
// Does the address that authorized the transaction actually match whatever address the sender has rekeyed to?
// i.e., the sig/lsig/msig was checked against the txn.Authorizer() address, but does this match the sender's balrecord.AuthAddr?
- acctdata, err := cow.lookup(txn.Txn.Sender)
- if err != nil {
- return err
+ acctdata, lookupErr := cow.lookup(txn.Txn.Sender)
+ if lookupErr != nil {
+ return lookupErr
}
correctAuthorizer := acctdata.AuthAddr
if (correctAuthorizer == basics.Address{}) {
@@ -1300,9 +1300,9 @@ func (eval *BlockEvaluator) endOfBlock() error {
if eval.validate {
// check commitments
- txnRoot, err := eval.block.PaysetCommit()
- if err != nil {
- return err
+ txnRoot, err2 := eval.block.PaysetCommit()
+ if err2 != nil {
+ return err2
}
if txnRoot != eval.block.TxnCommitments {
return fmt.Errorf("txn root wrong: %v != %v", txnRoot, eval.block.TxnCommitments)
@@ -1316,9 +1316,9 @@ func (eval *BlockEvaluator) endOfBlock() error {
return fmt.Errorf("txn count wrong: %d != %d", eval.block.TxnCounter, expectedTxnCount)
}
- expectedVoters, expectedVotersWeight, err := eval.stateProofVotersAndTotal()
- if err != nil {
- return err
+ expectedVoters, expectedVotersWeight, err2 := eval.stateProofVotersAndTotal()
+ if err2 != nil {
+ return err2
}
if !eval.block.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment.IsEqual(expectedVoters) {
return fmt.Errorf("StateProofVotersCommitment wrong: %v != %v", eval.block.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment, expectedVoters)
@@ -1700,10 +1700,10 @@ transactionGroupLoop:
}
case <-ctx.Done():
return ledgercore.StateDelta{}, ctx.Err()
- case err, open := <-txvalidator.done:
+ case doneErr, open := <-txvalidator.done:
// if we're not validating, then `txvalidator.done` would be nil, in which case this case statement would never be executed.
- if open && err != nil {
- return ledgercore.StateDelta{}, err
+ if open && doneErr != nil {
+ return ledgercore.StateDelta{}, doneErr
}
}
}
diff --git a/ledger/eval/eval_test.go b/ledger/eval/eval_test.go
index 08a8b8c82..783f975bb 100644
--- a/ledger/eval/eval_test.go
+++ b/ledger/eval/eval_test.go
@@ -941,7 +941,7 @@ func (ledger *evalTestLedger) BlockHdr(rnd basics.Round) (bookkeeping.BlockHeade
}
func (ledger *evalTestLedger) BlockHdrCached(rnd basics.Round) (bookkeeping.BlockHeader, error) {
- return ledger.BlockHdrCached(rnd)
+ return ledger.BlockHdr(rnd)
}
func (ledger *evalTestLedger) VotersForStateProof(rnd basics.Round) (*ledgercore.VotersForRound, error) {
@@ -1042,7 +1042,7 @@ func (l *testCowBaseLedger) BlockHdr(basics.Round) (bookkeeping.BlockHeader, err
}
func (l *testCowBaseLedger) BlockHdrCached(rnd basics.Round) (bookkeeping.BlockHeader, error) {
- return l.BlockHdrCached(rnd)
+ return l.BlockHdr(rnd)
}
func (l *testCowBaseLedger) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error {
diff --git a/ledger/eval/prefetcher/prefetcher.go b/ledger/eval/prefetcher/prefetcher.go
index e00d78f70..40702b008 100644
--- a/ledger/eval/prefetcher/prefetcher.go
+++ b/ledger/eval/prefetcher/prefetcher.go
@@ -200,16 +200,16 @@ func loadAccountsAddAccountTask(addr *basics.Address, wt *groupTask, accountTask
return
}
if task, have := accountTasks[*addr]; !have {
- task := &preloaderTask{
+ newTask := &preloaderTask{
address: addr,
groupTasks: make([]*groupTask, 1, 4),
groupTasksIndices: make([]int, 1, 4),
}
- task.groupTasks[0] = wt
- task.groupTasksIndices[0] = wt.balancesCount
+ newTask.groupTasks[0] = wt
+ newTask.groupTasksIndices[0] = wt.balancesCount
- accountTasks[*addr] = task
- queue.enqueue(task)
+ accountTasks[*addr] = newTask
+ queue.enqueue(newTask)
} else {
task.groupTasks = append(task.groupTasks, wt)
task.groupTasksIndices = append(task.groupTasksIndices, wt.balancesCount)
@@ -228,18 +228,18 @@ func loadAccountsAddResourceTask(addr *basics.Address, cidx basics.CreatableInde
key.address = *addr
}
if task, have := resourceTasks[key]; !have {
- task := &preloaderTask{
+ newTask := &preloaderTask{
address: addr,
groupTasks: make([]*groupTask, 1, 4),
groupTasksIndices: make([]int, 1, 4),
creatableIndex: cidx,
creatableType: ctype,
}
- task.groupTasks[0] = wt
- task.groupTasksIndices[0] = wt.resourcesCount
+ newTask.groupTasks[0] = wt
+ newTask.groupTasksIndices[0] = wt.resourcesCount
- resourceTasks[key] = task
- queue.enqueue(task)
+ resourceTasks[key] = newTask
+ queue.enqueue(newTask)
} else {
task.groupTasks = append(task.groupTasks, wt)
task.groupTasksIndices = append(task.groupTasksIndices, wt.resourcesCount)
diff --git a/ledger/eval/txntracer.go b/ledger/eval/txntracer.go
index 036ad773d..f4c6277c7 100644
--- a/ledger/eval/txntracer.go
+++ b/ledger/eval/txntracer.go
@@ -20,6 +20,8 @@ import (
"fmt"
"github.com/algorand/go-deadlock"
+ "golang.org/x/exp/maps"
+ "golang.org/x/exp/slices"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
@@ -51,36 +53,22 @@ func convertStateDelta(delta ledgercore.StateDelta) StateDeltaSubset {
// The StateDelta object returned through the EvalTracer has its values deleted between txn groups to avoid
// reallocation during evaluation.
// This means the map values need to be copied (to avoid deletion) since they are all passed by reference.
- kvmods := make(map[string]ledgercore.KvValueDelta, len(delta.KvMods))
- for k1, v1 := range delta.KvMods {
- kvmods[k1] = v1
- }
- txids := make(map[transactions.Txid]ledgercore.IncludedTransactions, len(delta.Txids))
- for k2, v2 := range delta.Txids {
- txids[k2] = v2
- }
- txleases := make(map[ledgercore.Txlease]basics.Round, len(delta.Txleases))
- for k3, v3 := range delta.Txleases {
- txleases[k3] = v3
- }
- creatables := make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable, len(delta.Creatables))
- for k4, v4 := range delta.Creatables {
- creatables[k4] = v4
- }
+ kvmods := maps.Clone(delta.KvMods)
+ txids := maps.Clone(delta.Txids)
+ txleases := maps.Clone(delta.Txleases)
+ creatables := maps.Clone(delta.Creatables)
+
var accR []ledgercore.BalanceRecord
var appR []ledgercore.AppResourceRecord
var assetR []ledgercore.AssetResourceRecord
if len(delta.Accts.Accts) > 0 {
- accR = make([]ledgercore.BalanceRecord, len(delta.Accts.Accts))
- copy(accR, delta.Accts.Accts)
+ accR = slices.Clone(delta.Accts.Accts)
}
if len(delta.Accts.AppResources) > 0 {
- appR = make([]ledgercore.AppResourceRecord, len(delta.Accts.AppResources))
- copy(appR, delta.Accts.AppResources)
+ appR = slices.Clone(delta.Accts.AppResources)
}
if len(delta.Accts.AssetResources) > 0 {
- assetR = make([]ledgercore.AssetResourceRecord, len(delta.Accts.AssetResources))
- copy(assetR, delta.Accts.AssetResources)
+ assetR = slices.Clone(delta.Accts.AssetResources)
}
return StateDeltaSubset{
Accts: ledgercore.AccountDeltas{
diff --git a/ledger/evalbench_test.go b/ledger/evalbench_test.go
index 513449727..b0c3b6780 100644
--- a/ledger/evalbench_test.go
+++ b/ledger/evalbench_test.go
@@ -309,7 +309,7 @@ func BenchmarkBlockEvaluatorDiskAppCalls(b *testing.B) {
// the setup time for this test is 1.5 minutes long. By setting the b.N = 2, we
// set up for success on the first iteration, and preventing a second iteration.
if b.N < 2 {
- b.N = 2
+ b.N = 2 //nolint:staticcheck // intentionally setting b.N
}
// program sets all 16 available keys of len 64 bytes to same values of 64 bytes
source := `#pragma version 5
diff --git a/ledger/ledger.go b/ledger/ledger.go
index 0c7747087..dd54529ee 100644
--- a/ledger/ledger.go
+++ b/ledger/ledger.go
@@ -51,7 +51,7 @@ type Ledger struct {
// Database connections to the DBs storing blocks and tracker state.
// We use potentially different databases to avoid SQLite contention
// during catchup.
- trackerDBs trackerdb.TrackerStore
+ trackerDBs trackerdb.Store
blockDBs db.Pair
// blockQ is the buffer of added blocks that will be flushed to
@@ -144,14 +144,11 @@ func OpenLedger(
}
}()
- l.trackerDBs, l.blockDBs, err = openLedgerDB(dbPathPrefix, dbMem)
+ l.trackerDBs, l.blockDBs, err = openLedgerDB(dbPathPrefix, dbMem, cfg, log)
if err != nil {
err = fmt.Errorf("OpenLedger.openLedgerDB %v", err)
return nil, err
}
- l.trackerDBs.SetLogger(log)
- l.blockDBs.Rdb.SetLogger(log)
- l.blockDBs.Wdb.SetLogger(log)
l.setSynchronousMode(context.Background(), l.synchronousMode)
@@ -284,12 +281,9 @@ func (l *Ledger) verifyMatchingGenesisHash() (err error) {
return
}
-func openLedgerDB(dbPathPrefix string, dbMem bool) (trackerDBs trackerdb.TrackerStore, blockDBs db.Pair, err error) {
+func openLedgerDB(dbPathPrefix string, dbMem bool, cfg config.Local, log logging.Logger) (trackerDBs trackerdb.Store, blockDBs db.Pair, err error) {
// Backwards compatibility: we used to store both blocks and tracker
// state in a single SQLite db file.
- var trackerDBFilename string
- var blockDBFilename string
-
if !dbMem {
commonDBFilename := dbPathPrefix + ".sqlite"
_, err = os.Stat(commonDBFilename)
@@ -302,20 +296,32 @@ func openLedgerDB(dbPathPrefix string, dbMem bool) (trackerDBs trackerdb.Tracker
}
}
- trackerDBFilename = dbPathPrefix + ".tracker.sqlite"
- blockDBFilename = dbPathPrefix + ".block.sqlite"
-
outErr := make(chan error, 2)
go func() {
var lerr error
- trackerDBs, lerr = sqlitedriver.OpenTrackerSQLStore(trackerDBFilename, dbMem)
+ switch cfg.StorageEngine {
+ case "sqlite":
+ fallthrough
+ // anything else will initialize a sqlite engine.
+ default:
+ file := dbPathPrefix + ".tracker.sqlite"
+ trackerDBs, lerr = sqlitedriver.Open(file, dbMem, log)
+ }
+
outErr <- lerr
}()
go func() {
var lerr error
+ blockDBFilename := dbPathPrefix + ".block.sqlite"
blockDBs, lerr = db.OpenPair(blockDBFilename, dbMem)
- outErr <- lerr
+ if lerr != nil {
+ outErr <- lerr
+ return
+ }
+ blockDBs.Rdb.SetLogger(log)
+ blockDBs.Wdb.SetLogger(log)
+ outErr <- nil
}()
err = <-outErr
@@ -496,24 +502,6 @@ func (l *Ledger) GetStateProofVerificationContext(stateProofLastAttestedRound ba
return l.spVerification.LookupVerificationContext(stateProofLastAttestedRound)
}
-// ListAssets takes a maximum asset index and maximum result length, and
-// returns up to that many CreatableLocators from the database where app idx is
-// less than or equal to the maximum.
-func (l *Ledger) ListAssets(maxAssetIdx basics.AssetIndex, maxResults uint64) (results []basics.CreatableLocator, err error) {
- l.trackerMu.RLock()
- defer l.trackerMu.RUnlock()
- return l.accts.ListAssets(maxAssetIdx, maxResults)
-}
-
-// ListApplications takes a maximum app index and maximum result length, and
-// returns up to that many CreatableLocators from the database where app idx is
-// less than or equal to the maximum.
-func (l *Ledger) ListApplications(maxAppIdx basics.AppIndex, maxResults uint64) (results []basics.CreatableLocator, err error) {
- l.trackerMu.RLock()
- defer l.trackerMu.RUnlock()
- return l.accts.ListApplications(maxAppIdx, maxResults)
-}
-
// LookupLatest uses the accounts tracker to return the account state (including
// resources) for a given address, for the latest round. The returned account values
// reflect the changes of all blocks up to and including the returned round number.
@@ -805,7 +793,7 @@ func (l *Ledger) GetCatchpointStream(round basics.Round) (ReadCloseSizer, error)
}
// ledgerForTracker methods
-func (l *Ledger) trackerDB() trackerdb.TrackerStore {
+func (l *Ledger) trackerDB() trackerdb.Store {
return l.trackerDBs
}
@@ -848,15 +836,19 @@ func (l *Ledger) VerifiedTransactionCache() verify.VerifiedTransactionCache {
// If a value of zero or less is passed to maxTxnBytesPerBlock, the consensus MaxTxnBytesPerBlock would
// be used instead.
// The tracer argument is a logic.EvalTracer which will be attached to the evaluator and have its hooked invoked during
-// the eval process for each block. A nil tracer will skip tracer invocation entirely.
+// the eval process for each block. A nil tracer will default to the tracer attached to the ledger.
func (l *Ledger) StartEvaluator(hdr bookkeeping.BlockHeader, paysetHint, maxTxnBytesPerBlock int, tracer logic.EvalTracer) (*eval.BlockEvaluator, error) {
+ tracerForEval := tracer
+ if tracerForEval == nil {
+ tracerForEval = l.tracer
+ }
return eval.StartEvaluator(l, hdr,
eval.EvaluatorOptions{
PaysetHint: paysetHint,
Generate: true,
Validate: true,
MaxTxnBytesPerBlock: maxTxnBytesPerBlock,
- Tracer: tracer,
+ Tracer: tracerForEval,
})
}
diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go
index 5b7ceb33c..f7a847a11 100644
--- a/ledger/ledger_test.go
+++ b/ledger/ledger_test.go
@@ -30,6 +30,7 @@ import (
"time"
"github.com/stretchr/testify/require"
+ "golang.org/x/exp/slices"
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
@@ -696,8 +697,7 @@ func TestLedgerSingleTxV24(t *testing.T) {
appIdx = 2 // the second successful txn
badTx = correctAppCreate
- program := make([]byte, len(approvalProgram))
- copy(program, approvalProgram)
+ program := slices.Clone(approvalProgram)
program[0] = '\x01'
badTx.ApprovalProgram = program
err = l.appendUnvalidatedTx(t, initAccounts, initSecrets, badTx, ad)
@@ -1623,71 +1623,6 @@ func generateCreatables(numElementsPerSegement int) (
return
}
-// TestListAssetsAndApplications tests the ledger.ListAssets and ledger.ListApplications
-// interfaces. The detailed test on the correctness of these functions is given in:
-// TestListCreatables (acctupdates_test.go)
-func TestListAssetsAndApplications(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- numElementsPerSegement := 10 // This is multiplied by 10. see randomCreatables
-
- //initLedger
- genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
- const inMem = true
- log := logging.TestingLog(t)
- cfg := config.GetDefaultLocal()
- cfg.Archival = true
- ledger, err := OpenLedger(log, t.Name(), inMem, genesisInitState, cfg)
- require.NoError(t, err, "could not open ledger")
- defer ledger.Close()
-
- // ******* All results are obtained from the cache. Empty database *******
- // ******* No deletes *******
- // get random data. Initial batch, no deletes
- randomCtbs, maxAsset, maxApp, err := generateCreatables(numElementsPerSegement)
- require.NoError(t, err)
-
- // set the cache
- ledger.accts.creatables = randomCtbs
-
- // Test ListAssets
- // Check the number of results limit
- results, err := ledger.ListAssets(basics.AssetIndex(maxAsset), 2)
- require.NoError(t, err)
- require.Equal(t, 2, len(results))
- // Check the max asset id limit
- results, err = ledger.ListAssets(basics.AssetIndex(maxAsset), 100)
- require.NoError(t, err)
- assetCount := 0
- for id, ctb := range randomCtbs {
- if ctb.Ctype == basics.AssetCreatable &&
- ctb.Created &&
- id <= maxAsset {
- assetCount++
- }
- }
- require.Equal(t, assetCount, len(results))
-
- // Test ListApplications
- // Check the number of results limit
- ledger.accts.creatables = randomCtbs
- results, err = ledger.ListApplications(basics.AppIndex(maxApp), 2)
- require.NoError(t, err)
- require.Equal(t, 2, len(results))
- // Check the max application id limit
- results, err = ledger.ListApplications(basics.AppIndex(maxApp), 100)
- require.NoError(t, err)
- appCount := 0
- for id, ctb := range randomCtbs {
- if ctb.Ctype == basics.AppCreatable &&
- ctb.Created &&
- id <= maxApp {
- appCount++
- }
- }
- require.Equal(t, appCount, len(results))
-}
-
// TestLedgerVerifiesOldStateProofs test that if stateproof chain is delayed for X intervals (pass StateProofMaxRecoveryIntervals),
// The ledger will still be able to verify the state proof - i.e the ledger has the necessary data to verify it.
func TestLedgerVerifiesOldStateProofs(t *testing.T) {
@@ -2277,7 +2212,7 @@ func TestLedgerReloadTxTailHistoryAccess(t *testing.T) {
// reset tables and re-init again, similary to the catchpount apply code
// since the ledger has only genesis accounts, this recreates them
- err = l.trackerDBs.Batch(func(ctx context.Context, tx trackerdb.BatchScope) error {
+ err = l.trackerDBs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error {
arw, err := tx.MakeAccountsWriter()
if err != nil {
return err
@@ -2296,7 +2231,7 @@ func TestLedgerReloadTxTailHistoryAccess(t *testing.T) {
DbPathPrefix: l.catchpoint.dbDirectory,
BlockDb: l.blockDBs,
}
- _, err0 = tx.Testing().RunMigrations(ctx, tp, l.log, preReleaseDBVersion /*target database version*/)
+ _, err0 = tx.RunMigrations(ctx, tp, l.log, preReleaseDBVersion /*target database version*/)
if err0 != nil {
return err0
}
@@ -2439,7 +2374,7 @@ func TestLedgerMigrateV6ShrinkDeltas(t *testing.T) {
cfg.MaxAcctLookback = proto.MaxBalLookback
log := logging.TestingLog(t)
log.SetLevel(logging.Info) // prevent spamming with ledger.AddValidatedBlock debug message
- trackerDB, blockDB, err := openLedgerDB(dbName, inMem)
+ trackerDB, blockDB, err := openLedgerDB(dbName, inMem, cfg, log)
require.NoError(t, err)
defer func() {
trackerDB.Close()
@@ -2447,10 +2382,7 @@ func TestLedgerMigrateV6ShrinkDeltas(t *testing.T) {
}()
// create tables so online accounts can still be written
err = trackerDB.Batch(func(ctx context.Context, tx trackerdb.BatchScope) error {
- if err := tx.Testing().AccountsUpdateSchemaTest(ctx); err != nil {
- return err
- }
- return nil
+ return tx.Testing().AccountsUpdateSchemaTest(ctx)
})
require.NoError(t, err)
@@ -3013,7 +2945,7 @@ func testVotersReloadFromDiskPassRecoveryPeriod(t *testing.T, cfg config.Local)
// the voters tracker should contain all the voters for each stateproof round. nothing should be removed
l.WaitForCommit(l.Latest())
- triggerTrackerFlush(t, l, genesisInitState)
+ triggerDeleteVoters(t, l, genesisInitState)
vtSnapshot := l.acctsOnline.voters.votersForRoundCache
beforeRemoveVotersLen := len(vtSnapshot)
@@ -3029,7 +2961,7 @@ func testVotersReloadFromDiskPassRecoveryPeriod(t *testing.T, cfg config.Local)
triggerDeleteVoters(t, l, genesisInitState)
- // round 512 should now be forgotten.
+ // round 256 (240+16) should now be forgotten.
_, found = l.acctsOnline.voters.votersForRoundCache[basics.Round(proto.StateProofInterval-proto.StateProofVotersLookback)]
require.False(t, found)
@@ -3084,37 +3016,6 @@ func TestVotersCallbackPersistsAfterLedgerReload(t *testing.T) {
require.Equal(t, listenerBeforeReload, listenerAfterReload)
}
-type errorCommitListener struct{}
-
-func (l *errorCommitListener) OnPrepareVoterCommit(oldBase basics.Round, newBase basics.Round, _ ledgercore.LedgerForSPBuilder) {
-}
-
-func TestLedgerContinuesOnVotersCallbackFailure(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
- genesisInitState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion
- const inMem = true
- cfg := config.GetDefaultLocal()
- cfg.MaxAcctLookback = 0
- log := logging.TestingLog(t)
- log.SetLevel(logging.Info)
- l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
- require.NoError(t, err)
- defer l.Close()
-
- commitListener := errorCommitListener{}
- l.RegisterVotersCommitListener(&commitListener)
-
- previousCachedDbRound := l.trackers.dbRound
- triggerTrackerFlush(t, l, genesisInitState)
- l.trackers.mu.Lock()
- newDbRound := l.trackers.dbRound
- l.trackers.mu.Unlock()
- require.Equal(t, previousCachedDbRound+1, newDbRound)
-}
-
func TestLedgerSPVerificationTracker(t *testing.T) {
partitiontest.PartitionTest(t)
proto := config.Consensus[protocol.ConsensusCurrentVersion]
@@ -3225,7 +3126,19 @@ func TestLedgerReloadStateProofVerificationTracker(t *testing.T) {
addEmptyValidatedBlock(t, l, genesisInitState.Accounts)
}
- triggerTrackerFlush(t, l, genesisInitState)
+ // trigger trackers flush
+ // first ensure the block is committed into blockdb
+ l.WaitForCommit(l.Latest())
+ // wait for any pending tracker flushes
+ l.trackers.waitAccountsWriting()
+ // force flush as needed
+ if l.LatestTrackerCommitted() < l.Latest()+basics.Round(cfg.MaxAcctLookback) {
+ l.trackers.mu.Lock()
+ l.trackers.lastFlushTime = time.Time{}
+ l.trackers.mu.Unlock()
+ l.notifyCommit(l.Latest())
+ l.trackers.waitAccountsWriting()
+ }
verifyStateProofVerificationTracking(t, &l.spVerification, basics.Round(firstStateProofContextTargetRound),
numOfStateProofs-1, proto.StateProofInterval, true, trackerDB)
@@ -3356,7 +3269,18 @@ func TestLedgerSPTrackerAfterReplay(t *testing.T) {
// To be deleted, but not yet deleted (waiting for commit)
verifyStateProofVerificationTracking(t, &l.spVerification, firstStateProofRound, 1, proto.StateProofInterval, true, any)
- triggerTrackerFlush(t, l, genesisInitState)
+ // first ensure the block is committed into blockdb
+ l.WaitForCommit(l.Latest())
+ // wait for any pending tracker flushes
+ l.trackers.waitAccountsWriting()
+ // force flush as needed
+ if l.LatestTrackerCommitted() < l.Latest()+basics.Round(cfg.MaxAcctLookback) {
+ l.trackers.mu.Lock()
+ l.trackers.lastFlushTime = time.Time{}
+ l.trackers.mu.Unlock()
+ l.notifyCommit(spblk.BlockHeader.Round)
+ l.trackers.waitAccountsWriting()
+ }
err = l.reloadLedger()
a.NoError(err)
diff --git a/ledger/ledgercore/error.go b/ledger/ledgercore/error.go
index c98312a9a..f5d347d6f 100644
--- a/ledger/ledgercore/error.go
+++ b/ledger/ledgercore/error.go
@@ -28,6 +28,7 @@ import (
var ErrNoSpace = errors.New("block does not have space for transaction")
// TxnNotWellFormedError indicates a transaction was not well-formed when evaluated by the BlockEvaluator
+//
//msgp:ignore TxnNotWellFormedError
type TxnNotWellFormedError string
@@ -106,6 +107,7 @@ func (err ErrNonSequentialBlockEval) Error() string {
}
// TxGroupMalformedErrorReasonCode is a reason code for TxGroupMalformed
+//
//msgp:ignore TxGroupMalformedErrorReasonCode
type TxGroupMalformedErrorReasonCode int
diff --git a/ledger/ledgercore/msgp_gen.go b/ledger/ledgercore/msgp_gen.go
index 709658f43..b0b8bdd28 100644
--- a/ledger/ledgercore/msgp_gen.go
+++ b/ledger/ledgercore/msgp_gen.go
@@ -4,6 +4,10 @@ package ledgercore
import (
"github.com/algorand/msgp/msgp"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/protocol"
)
// The following msgp objects are implemented in this file:
@@ -14,6 +18,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> AccountTotalsMaxSize()
//
// AlgoCount
// |-----> (*) MarshalMsg
@@ -22,6 +27,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> AlgoCountMaxSize()
//
// OnlineRoundParamsData
// |-----> (*) MarshalMsg
@@ -30,6 +36,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> OnlineRoundParamsDataMaxSize()
//
// StateProofVerificationContext
// |-----> (*) MarshalMsg
@@ -38,6 +45,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> StateProofVerificationContextMaxSize()
//
// MarshalMsg implements msgp.Marshaler
@@ -665,6 +673,12 @@ func (z *AccountTotals) MsgIsZero() bool {
return (((*z).Online.Money.MsgIsZero()) && ((*z).Online.RewardUnits == 0)) && (((*z).Offline.Money.MsgIsZero()) && ((*z).Offline.RewardUnits == 0)) && (((*z).NotParticipating.Money.MsgIsZero()) && ((*z).NotParticipating.RewardUnits == 0)) && ((*z).RewardsLevel == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func AccountTotalsMaxSize() (s int) {
+ s = 1 + 7 + 1 + 4 + basics.MicroAlgosMaxSize() + 4 + msgp.Uint64Size + 8 + 1 + 4 + basics.MicroAlgosMaxSize() + 4 + msgp.Uint64Size + 8 + 1 + 4 + basics.MicroAlgosMaxSize() + 4 + msgp.Uint64Size + 7 + msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *AlgoCount) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -794,6 +808,12 @@ func (z *AlgoCount) MsgIsZero() bool {
return ((*z).Money.MsgIsZero()) && ((*z).RewardUnits == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func AlgoCountMaxSize() (s int) {
+ s = 1 + 4 + basics.MicroAlgosMaxSize() + 4 + msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *OnlineRoundParamsData) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -946,6 +966,12 @@ func (z *OnlineRoundParamsData) MsgIsZero() bool {
return ((*z).OnlineSupply == 0) && ((*z).RewardsLevel == 0) && ((*z).CurrentProtocol.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func OnlineRoundParamsDataMaxSize() (s int) {
+ s = 1 + 7 + msgp.Uint64Size + 7 + msgp.Uint64Size + 6 + protocol.ConsensusVersionMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *StateProofVerificationContext) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1120,3 +1146,9 @@ func (z *StateProofVerificationContext) Msgsize() (s int) {
func (z *StateProofVerificationContext) MsgIsZero() bool {
return ((*z).LastAttestedRound.MsgIsZero()) && ((*z).VotersCommitment.MsgIsZero()) && ((*z).OnlineTotalWeight.MsgIsZero()) && ((*z).Version.MsgIsZero())
}
+
+// MaxSize returns a maximum valid message size for this message type
+func StateProofVerificationContextMaxSize() (s int) {
+ s = 1 + 8 + basics.RoundMaxSize() + 3 + crypto.GenericDigestMaxSize() + 3 + basics.MicroAlgosMaxSize() + 2 + protocol.ConsensusVersionMaxSize()
+ return
+}
diff --git a/ledger/ledgercore/statedelta.go b/ledger/ledgercore/statedelta.go
index 7e9e25e0d..a730af251 100644
--- a/ledger/ledgercore/statedelta.go
+++ b/ledger/ledgercore/statedelta.go
@@ -22,6 +22,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
+ "golang.org/x/exp/maps"
)
const (
@@ -293,38 +294,24 @@ func (ad *AccountDeltas) Dehydrate() {
if ad.acctsCache == nil {
ad.acctsCache = make(map[basics.Address]int)
}
- for key := range ad.acctsCache {
- delete(ad.acctsCache, key)
- }
+ maps.Clear(ad.acctsCache)
if ad.appResourcesCache == nil {
ad.appResourcesCache = make(map[AccountApp]int)
}
- for key := range ad.appResourcesCache {
- delete(ad.appResourcesCache, key)
- }
+ maps.Clear(ad.appResourcesCache)
if ad.assetResourcesCache == nil {
ad.assetResourcesCache = make(map[AccountAsset]int)
}
- for key := range ad.assetResourcesCache {
- delete(ad.assetResourcesCache, key)
- }
+ maps.Clear(ad.assetResourcesCache)
}
// Reset resets the StateDelta for re-use with sync.Pool
func (sd *StateDelta) Reset() {
sd.Accts.reset()
- for txid := range sd.Txids {
- delete(sd.Txids, txid)
- }
- for txLease := range sd.Txleases {
- delete(sd.Txleases, txLease)
- }
- for creatableIndex := range sd.Creatables {
- delete(sd.Creatables, creatableIndex)
- }
- for key := range sd.KvMods {
- delete(sd.KvMods, key)
- }
+ maps.Clear(sd.Txids)
+ maps.Clear(sd.Txleases)
+ maps.Clear(sd.Creatables)
+ maps.Clear(sd.KvMods)
sd.Totals = AccountTotals{}
// these fields are going to be populated on next use but resetting them anyway for safety.
@@ -342,15 +329,9 @@ func (ad *AccountDeltas) reset() {
ad.AssetResources = ad.AssetResources[:0]
// reset the maps
- for address := range ad.acctsCache {
- delete(ad.acctsCache, address)
- }
- for aApp := range ad.appResourcesCache {
- delete(ad.appResourcesCache, aApp)
- }
- for aAsset := range ad.assetResourcesCache {
- delete(ad.assetResourcesCache, aAsset)
- }
+ maps.Clear(ad.acctsCache)
+ maps.Clear(ad.appResourcesCache)
+ maps.Clear(ad.assetResourcesCache)
}
// notAllocated returns true if any of the fields allocated by MakeAccountDeltas is nil
@@ -581,11 +562,7 @@ func (sd *StateDelta) OptimizeAllocatedMemory(maxBalLookback uint64) {
// realloc if original allocation capacity greater than length of data, and space difference is significant
if 2*sd.initialHint > len(sd.Accts.acctsCache) &&
uint64(2*sd.initialHint-len(sd.Accts.acctsCache))*accountMapCacheEntrySize*maxBalLookback > stateDeltaTargetOptimizationThreshold {
- acctsCache := make(map[basics.Address]int, len(sd.Accts.acctsCache))
- for k, v := range sd.Accts.acctsCache {
- acctsCache[k] = v
- }
- sd.Accts.acctsCache = acctsCache
+ sd.Accts.acctsCache = maps.Clone(sd.Accts.acctsCache)
}
}
diff --git a/ledger/ledgercore/totals.go b/ledger/ledgercore/totals.go
index 4e75ddbf4..904cac911 100644
--- a/ledger/ledgercore/totals.go
+++ b/ledger/ledgercore/totals.go
@@ -112,7 +112,7 @@ func (at *AccountTotals) All() basics.MicroAlgos {
return res
}
-// Participating returns the sum of algos held under ``participating''
+// Participating returns the sum of algos held under “participating”
// account status values (Online and Offline). It excludes MicroAlgos held
// by NotParticipating accounts.
func (at *AccountTotals) Participating() basics.MicroAlgos {
@@ -123,7 +123,7 @@ func (at *AccountTotals) Participating() basics.MicroAlgos {
return res
}
-// RewardUnits returns the sum of reward units held under ``participating''
+// RewardUnits returns the sum of reward units held under “participating”
// account status values (Online and Offline). It excludes units held
// by NotParticipating accounts.
func (at *AccountTotals) RewardUnits() uint64 {
diff --git a/ledger/ledgercore/votersForRound.go b/ledger/ledgercore/votersForRound.go
index 94901bf20..4cce400bc 100644
--- a/ledger/ledgercore/votersForRound.go
+++ b/ledger/ledgercore/votersForRound.go
@@ -170,7 +170,7 @@ func (tr *VotersForRound) BroadcastError(err error) {
tr.mu.Unlock()
}
-//Wait waits for the tree to get constructed.
+// Wait waits for the tree to get constructed.
func (tr *VotersForRound) Wait() error {
tr.mu.Lock()
defer tr.mu.Unlock()
diff --git a/ledger/metrics.go b/ledger/metrics.go
index 49375ed83..661cb0c2d 100644
--- a/ledger/metrics.go
+++ b/ledger/metrics.go
@@ -30,12 +30,14 @@ type metricsTracker struct {
ledgerTransactionsTotal *metrics.Counter
ledgerRewardClaimsTotal *metrics.Counter
ledgerRound *metrics.Gauge
+ ledgerDBRound *metrics.Gauge
}
func (mt *metricsTracker) loadFromDisk(l ledgerForTracker, _ basics.Round) error {
mt.ledgerTransactionsTotal = metrics.MakeCounter(metrics.LedgerTransactionsTotal)
mt.ledgerRewardClaimsTotal = metrics.MakeCounter(metrics.LedgerRewardClaimsTotal)
mt.ledgerRound = metrics.MakeGauge(metrics.LedgerRound)
+ mt.ledgerDBRound = metrics.MakeGauge(metrics.LedgerDBRound)
return nil
}
@@ -52,6 +54,10 @@ func (mt *metricsTracker) close() {
mt.ledgerRound.Deregister(nil)
mt.ledgerRound = nil
}
+ if mt.ledgerDBRound != nil {
+ mt.ledgerDBRound.Deregister(nil)
+ mt.ledgerDBRound = nil
+ }
}
func (mt *metricsTracker) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) {
@@ -75,6 +81,7 @@ func (mt *metricsTracker) commitRound(context.Context, trackerdb.TransactionScop
}
func (mt *metricsTracker) postCommit(ctx context.Context, dcc *deferredCommitContext) {
+ mt.ledgerDBRound.Set(uint64(dcc.newBase()))
}
func (mt *metricsTracker) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) {
diff --git a/ledger/msgp_gen.go b/ledger/msgp_gen.go
index 4164a526d..7148edfa6 100644
--- a/ledger/msgp_gen.go
+++ b/ledger/msgp_gen.go
@@ -5,6 +5,8 @@ package ledger
import (
"github.com/algorand/msgp/msgp"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/ledger/encoded"
"github.com/algorand/go-algorand/ledger/ledgercore"
)
@@ -17,6 +19,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> CatchpointCatchupStateMaxSize()
//
// CatchpointFileHeader
// |-----> (*) MarshalMsg
@@ -25,6 +28,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> CatchpointFileHeaderMaxSize()
//
// catchpointFileBalancesChunkV5
// |-----> (*) MarshalMsg
@@ -33,6 +37,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> CatchpointFileBalancesChunkV5MaxSize()
//
// catchpointFileChunkV6
// |-----> (*) MarshalMsg
@@ -41,6 +46,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> CatchpointFileChunkV6MaxSize()
//
// catchpointStateProofVerificationContext
// |-----> (*) MarshalMsg
@@ -49,6 +55,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> CatchpointStateProofVerificationContextMaxSize()
//
// MarshalMsg implements msgp.Marshaler
@@ -97,6 +104,12 @@ func (z CatchpointCatchupState) MsgIsZero() bool {
return z == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func CatchpointCatchupStateMaxSize() (s int) {
+ s = msgp.Int32Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *CatchpointFileHeader) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -387,6 +400,14 @@ func (z *CatchpointFileHeader) MsgIsZero() bool {
return ((*z).Version == 0) && ((*z).BalancesRound.MsgIsZero()) && ((*z).BlocksRound.MsgIsZero()) && ((*z).Totals.MsgIsZero()) && ((*z).TotalAccounts == 0) && ((*z).TotalChunks == 0) && ((*z).TotalKVs == 0) && ((*z).Catchpoint == "") && ((*z).BlockHeaderDigest.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func CatchpointFileHeaderMaxSize() (s int) {
+ s = 1 + 8 + msgp.Uint64Size + 14 + basics.RoundMaxSize() + 12 + basics.RoundMaxSize() + 14 + ledgercore.AccountTotalsMaxSize() + 14 + msgp.Uint64Size + 12 + msgp.Uint64Size + 9 + msgp.Uint64Size + 11
+ panic("Unable to determine max size: String type z.Catchpoint is unbounded")
+ s += 18 + crypto.DigestMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *catchpointFileBalancesChunkV5) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -545,6 +566,14 @@ func (z *catchpointFileBalancesChunkV5) MsgIsZero() bool {
return (len((*z).Balances) == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func CatchpointFileBalancesChunkV5MaxSize() (s int) {
+ s = 1 + 3
+ // Calculating size of slice: z.Balances
+ s += msgp.ArrayHeaderSize + ((BalancesPerCatchpointFileChunk) * (encoded.BalanceRecordV5MaxSize()))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *catchpointFileChunkV6) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -779,6 +808,17 @@ func (z *catchpointFileChunkV6) MsgIsZero() bool {
return (len((*z).Balances) == 0) && (len((*z).KVs) == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func CatchpointFileChunkV6MaxSize() (s int) {
+ s = 1 + 3
+ // Calculating size of slice: z.Balances
+ s += msgp.ArrayHeaderSize + ((BalancesPerCatchpointFileChunk) * (encoded.BalanceRecordV6MaxSize()))
+ s += 3
+ // Calculating size of slice: z.KVs
+ s += msgp.ArrayHeaderSize + ((BalancesPerCatchpointFileChunk) * (encoded.KVRecordV6MaxSize()))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *catchpointStateProofVerificationContext) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -936,3 +976,11 @@ func (z *catchpointStateProofVerificationContext) Msgsize() (s int) {
func (z *catchpointStateProofVerificationContext) MsgIsZero() bool {
return (len((*z).Data) == 0)
}
+
+// MaxSize returns a maximum valid message size for this message type
+func CatchpointStateProofVerificationContextMaxSize() (s int) {
+ s = 1 + 4
+ // Calculating size of slice: z.Data
+ s += msgp.ArrayHeaderSize + ((SPContextPerCatchpointFile) * (ledgercore.StateProofVerificationContextMaxSize()))
+ return
+}
diff --git a/ledger/onlineaccountscache_test.go b/ledger/onlineaccountscache_test.go
index 45dde5278..0d4de85fc 100644
--- a/ledger/onlineaccountscache_test.go
+++ b/ledger/onlineaccountscache_test.go
@@ -216,7 +216,7 @@ func benchmarkOnlineAccountsCacheRead(b *testing.B, historyLength int) {
// preparation stage above non-negligible.
minN := 100
if b.N < minN {
- b.N = minN
+ b.N = minN //nolint:staticcheck // intentionally setting b.N
}
var r cachedOnlineAccount
diff --git a/ledger/simulation/simulation_eval_test.go b/ledger/simulation/simulation_eval_test.go
index ee5c800f3..63d46bf53 100644
--- a/ledger/simulation/simulation_eval_test.go
+++ b/ledger/simulation/simulation_eval_test.go
@@ -20,6 +20,7 @@ import (
"encoding/binary"
"encoding/hex"
"fmt"
+ "math"
"strings"
"testing"
@@ -29,12 +30,12 @@ import (
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/transactions/logic/mocktracer"
"github.com/algorand/go-algorand/data/txntest"
-
"github.com/algorand/go-algorand/ledger/simulation"
simulationtesting "github.com/algorand/go-algorand/ledger/simulation/testing"
ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -47,6 +48,7 @@ func uint64ToBytes(num uint64) []byte {
type simulationTestCase struct {
input simulation.Request
+ developerAPI bool
expected simulation.Result
expectedError string
}
@@ -122,11 +124,10 @@ func simulationTest(t *testing.T, f func(env simulationtesting.Environment) simu
t.Helper()
env := simulationtesting.PrepareSimulatorTest(t)
defer env.Close()
- s := simulation.MakeSimulator(env.Ledger)
testcase := f(env)
- actual, err := s.Simulate(testcase.input)
+ actual, err := simulation.MakeSimulator(env.Ledger, testcase.developerAPI).Simulate(testcase.input)
require.NoError(t, err)
validateSimulationResult(t, actual)
@@ -267,6 +268,60 @@ func TestPayTxn(t *testing.T) {
})
}
+func TestIllFormedStackRequest(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ env := simulationtesting.PrepareSimulatorTest(t)
+ defer env.Close()
+
+ sender := env.Accounts[0]
+ futureAppID := basics.AppIndex(1001)
+
+ createTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: 0,
+ ApprovalProgram: `#pragma version 6
+txn ApplicationID
+bz create
+byte "app call"
+log
+b end
+create:
+byte "app creation"
+log
+end:
+int 1`,
+ ClearStateProgram: `#pragma version 6
+int 0`,
+ })
+ callTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: futureAppID,
+ })
+
+ txntest.Group(&createTxn, &callTxn)
+
+ signedCreateTxn := createTxn.Txn().Sign(sender.Sk)
+ signedCallTxn := callTxn.Txn().Sign(sender.Sk)
+
+ simRequest := simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{
+ {signedCreateTxn, signedCallTxn},
+ },
+ TraceConfig: simulation.ExecTraceConfig{
+ Enable: false,
+ Stack: true,
+ },
+ }
+
+ _, err := simulation.MakeSimulator(env.Ledger, true).Simulate(simRequest)
+ require.ErrorAs(t, err, &simulation.InvalidRequestError{})
+ require.ErrorContains(t, err, "basic trace must be enabled when enabling stack tracing")
+}
+
func TestWrongAuthorizerTxn(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -369,7 +424,7 @@ func TestStateProofTxn(t *testing.T) {
env := simulationtesting.PrepareSimulatorTest(t)
defer env.Close()
- s := simulation.MakeSimulator(env.Ledger)
+ s := simulation.MakeSimulator(env.Ledger, false)
txgroup := []transactions.SignedTxn{
env.TxnInfo.NewTxn(txntest.Txn{
@@ -388,7 +443,7 @@ func TestSimpleGroupTxn(t *testing.T) {
env := simulationtesting.PrepareSimulatorTest(t)
defer env.Close()
- s := simulation.MakeSimulator(env.Ledger)
+ s := simulation.MakeSimulator(env.Ledger, false)
sender1 := env.Accounts[0]
sender1Balance := env.Accounts[0].AcctData.MicroAlgos
sender2 := env.Accounts[1]
@@ -917,6 +972,106 @@ func TestAppCallWithExtraBudget(t *testing.T) {
})
}
+func TestAppCallWithExtraBudgetReturningPC(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // Transaction group has a cost of 4 + 1404
+ expensiveAppSource := `#pragma version 6
+ txn ApplicationID // [appId]
+ bz end // []
+` + strings.Repeat(`int 1; pop;`, 700) + `end:
+ int 1`
+
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+
+ futureAppID := basics.AppIndex(1001)
+ // App create with cost 4
+ createTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: 0,
+ ApprovalProgram: expensiveAppSource,
+ ClearStateProgram: "#pragma version 6\nint 1",
+ })
+ // Expensive 700 repetition of int 1 and pop total cost 1404
+ expensiveTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: futureAppID,
+ })
+
+ txntest.Group(&createTxn, &expensiveTxn)
+
+ signedCreateTxn := createTxn.Txn().Sign(sender.Sk)
+ signedExpensiveTxn := expensiveTxn.Txn().Sign(sender.Sk)
+ extraOpcodeBudget := uint64(100)
+
+ commonLeadingSteps := []simulation.OpcodeTraceUnit{
+ {PC: 1}, {PC: 4}, {PC: 6},
+ }
+
+ // Get the first trace
+ firstTrace := make([]simulation.OpcodeTraceUnit, len(commonLeadingSteps))
+ copy(firstTrace, commonLeadingSteps[:])
+ firstTrace = append(firstTrace, simulation.OpcodeTraceUnit{PC: 1409})
+
+ // Get the second trace
+ secondTrace := make([]simulation.OpcodeTraceUnit, len(commonLeadingSteps))
+ copy(secondTrace, commonLeadingSteps[:])
+ for i := 9; i <= 1409; i++ {
+ secondTrace = append(secondTrace, simulation.OpcodeTraceUnit{PC: uint64(i)})
+ }
+
+ return simulationTestCase{
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{
+ {signedCreateTxn, signedExpensiveTxn},
+ },
+ ExtraOpcodeBudget: extraOpcodeBudget,
+ TraceConfig: simulation.ExecTraceConfig{
+ Enable: true,
+ },
+ },
+ developerAPI: true,
+ expected: simulation.Result{
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
+ TxnGroups: []simulation.TxnGroupResult{
+ {
+ Txns: []simulation.TxnResult{
+ {
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: transactions.ApplyData{
+ ApplicationID: futureAppID,
+ },
+ },
+ AppBudgetConsumed: 4,
+ Trace: &simulation.TransactionTrace{
+ ApprovalProgramTrace: firstTrace,
+ },
+ },
+ {
+ AppBudgetConsumed: 1404,
+ Trace: &simulation.TransactionTrace{
+ ApprovalProgramTrace: secondTrace,
+ },
+ },
+ },
+ AppBudgetAdded: 1500,
+ AppBudgetConsumed: 1408,
+ },
+ },
+ EvalOverrides: simulation.ResultEvalOverrides{ExtraOpcodeBudget: extraOpcodeBudget},
+ TraceConfig: simulation.ExecTraceConfig{
+ Enable: true,
+ },
+ },
+ }
+ })
+}
+
func TestAppCallWithExtraBudgetOverBudget(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -1004,7 +1159,7 @@ func TestAppCallWithExtraBudgetExceedsInternalLimit(t *testing.T) {
env := simulationtesting.PrepareSimulatorTest(t)
defer env.Close()
- s := simulation.MakeSimulator(env.Ledger)
+ s := simulation.MakeSimulator(env.Ledger, false)
sender := env.Accounts[0]
@@ -1212,7 +1367,7 @@ func TestDefaultSignatureCheck(t *testing.T) {
env := simulationtesting.PrepareSimulatorTest(t)
defer env.Close()
- s := simulation.MakeSimulator(env.Ledger)
+ s := simulation.MakeSimulator(env.Ledger, false)
sender := env.Accounts[0]
stxn := env.TxnInfo.NewTxn(txntest.Txn{
@@ -1397,7 +1552,6 @@ int 1`
simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
sender := env.Accounts[0]
- receiver := env.Accounts[1]
futureAppID := basics.AppIndex(1001)
@@ -1413,7 +1567,6 @@ int 1`
Type: protocol.ApplicationCallTx,
Sender: sender.Addr,
ApplicationID: futureAppID,
- Accounts: []basics.Address{receiver.Addr},
ApplicationArgs: [][]byte{[]byte("first-arg")},
})
@@ -1475,6 +1628,1242 @@ int 1`
})
}
+// The program is originated from pyteal source for c2c test over betanet:
+// https://github.com/ahangsu/c2c-testscript/blob/master/c2c_test/max_depth/app.py
+//
+// To fully test the PC exposure, we added opt-in and clear-state calls,
+// between funding and calling with on-complete deletion.
+// The modified version here: https://gist.github.com/ahangsu/7839f558dd36ad7117c0a12fb1dcc63a
+const maxDepthTealApproval = `#pragma version 8
+txn ApplicationID
+int 0
+==
+bnz main_l6
+txn OnCompletion
+int OptIn
+==
+bnz main_l6
+txn NumAppArgs
+int 1
+==
+bnz main_l3
+err
+main_l3:
+global CurrentApplicationID
+app_params_get AppApprovalProgram
+store 1
+store 0
+global CurrentApplicationID
+app_params_get AppClearStateProgram
+store 3
+store 2
+global CurrentApplicationAddress
+acct_params_get AcctBalance
+store 5
+store 4
+load 1
+assert
+load 3
+assert
+load 5
+assert
+int 2
+txna ApplicationArgs 0
+btoi
+exp
+itob
+log
+txna ApplicationArgs 0
+btoi
+int 0
+>
+bnz main_l5
+main_l4:
+int 1
+return
+main_l5:
+itxn_begin
+ int appl
+ itxn_field TypeEnum
+ int 0
+ itxn_field Fee
+ load 0
+ itxn_field ApprovalProgram
+ load 2
+ itxn_field ClearStateProgram
+itxn_submit
+itxn_begin
+ int pay
+ itxn_field TypeEnum
+ int 0
+ itxn_field Fee
+ load 4
+ int 100000
+ -
+ itxn_field Amount
+ byte "appID"
+ gitxn 0 CreatedApplicationID
+ itob
+ concat
+ sha512_256
+ itxn_field Receiver
+itxn_next
+ int appl
+ itxn_field TypeEnum
+ itxn CreatedApplicationID
+ itxn_field ApplicationID
+ int 0
+ itxn_field Fee
+ int OptIn
+ itxn_field OnCompletion
+itxn_next
+ int appl
+ itxn_field TypeEnum
+ itxn CreatedApplicationID
+ itxn_field ApplicationID
+ int 0
+ itxn_field Fee
+ int ClearState
+ itxn_field OnCompletion
+itxn_next
+ int appl
+ itxn_field TypeEnum
+ txna ApplicationArgs 0
+ btoi
+ int 1
+ -
+ itob
+ itxn_field ApplicationArgs
+ itxn CreatedApplicationID
+ itxn_field ApplicationID
+ int 0
+ itxn_field Fee
+ int DeleteApplication
+ itxn_field OnCompletion
+itxn_submit
+b main_l4
+main_l6:
+int 1
+return`
+
+func TestMaxDepthAppWithPCTrace(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+ futureAppID := basics.AppIndex(1001)
+
+ createTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: 0,
+ ApprovalProgram: maxDepthTealApproval,
+ ClearStateProgram: "#pragma version 8\nint 1",
+ })
+
+ MaxDepth := 2
+ MinBalance := env.TxnInfo.CurrentProtocolParams().MinBalance
+ MinFee := env.TxnInfo.CurrentProtocolParams().MinTxnFee
+
+ paymentTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: sender.Addr,
+ Receiver: futureAppID.Address(),
+ Amount: MinBalance * uint64(MaxDepth+1),
+ })
+
+ callsMaxDepth := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: futureAppID,
+ ApplicationArgs: [][]byte{{byte(MaxDepth)}},
+ Fee: MinFee * uint64(MaxDepth*5+2),
+ })
+
+ txntest.Group(&createTxn, &paymentTxn, &callsMaxDepth)
+
+ signedCreateTxn := createTxn.Txn().Sign(sender.Sk)
+ signedPaymentTxn := paymentTxn.Txn().Sign(sender.Sk)
+ signedCallsMaxDepth := callsMaxDepth.Txn().Sign(sender.Sk)
+
+ creationOpcodeTrace := []simulation.OpcodeTraceUnit{
+ {PC: 1},
+ {PC: 6},
+ {PC: 8},
+ {PC: 9},
+ {PC: 10},
+ {PC: 185},
+ {PC: 186},
+ }
+
+ clearStateOpcodeTrace := []simulation.OpcodeTraceUnit{{PC: 1}}
+
+ recursiveLongOpcodeTrace := []simulation.OpcodeTraceUnit{
+ {PC: 1},
+ {PC: 6},
+ {PC: 8},
+ {PC: 9},
+ {PC: 10},
+ {PC: 13},
+ {PC: 15},
+ {PC: 16},
+ {PC: 17},
+ {PC: 20},
+ {PC: 22},
+ {PC: 23},
+ {PC: 24},
+ {PC: 28},
+ {PC: 30},
+ {PC: 32},
+ {PC: 34},
+ {PC: 36},
+ {PC: 38},
+ {PC: 40},
+ {PC: 42},
+ {PC: 44},
+ {PC: 46},
+ {PC: 48},
+ {PC: 50},
+ {PC: 52},
+ {PC: 54},
+ {PC: 55},
+ {PC: 57},
+ {PC: 58},
+ {PC: 60},
+ {PC: 61},
+ {PC: 63},
+ {PC: 66},
+ {PC: 67},
+ {PC: 68},
+ {PC: 69},
+ {PC: 70},
+ {PC: 73},
+ {PC: 74},
+ {PC: 75},
+ {PC: 76},
+ {PC: 81},
+ {PC: 82},
+ {PC: 83},
+ {PC: 85},
+ {PC: 86},
+ {PC: 88},
+ {PC: 90},
+ {PC: 92},
+ {PC: 94},
+ {PC: 96, SpawnedInners: []int{0}},
+ {PC: 97},
+ {PC: 98},
+ {PC: 99},
+ {PC: 101},
+ {PC: 102},
+ {PC: 104},
+ {PC: 106},
+ {PC: 110},
+ {PC: 111},
+ {PC: 113},
+ {PC: 120},
+ {PC: 123},
+ {PC: 124},
+ {PC: 125},
+ {PC: 126},
+ {PC: 128},
+ {PC: 129},
+ {PC: 130},
+ {PC: 132},
+ {PC: 134},
+ {PC: 136},
+ {PC: 137},
+ {PC: 139},
+ {PC: 140},
+ {PC: 142},
+ {PC: 143},
+ {PC: 144},
+ {PC: 146},
+ {PC: 148},
+ {PC: 150},
+ {PC: 151},
+ {PC: 153},
+ {PC: 155},
+ {PC: 157},
+ {PC: 158},
+ {PC: 159},
+ {PC: 161},
+ {PC: 164},
+ {PC: 165},
+ {PC: 166},
+ {PC: 167},
+ {PC: 168},
+ {PC: 170},
+ {PC: 172},
+ {PC: 174},
+ {PC: 175},
+ {PC: 177},
+ {PC: 179},
+ {PC: 181, SpawnedInners: []int{1, 2, 3, 4}},
+ {PC: 182},
+ {PC: 79},
+ {PC: 80},
+ }
+
+ optInTrace := []simulation.OpcodeTraceUnit{
+ {PC: 1},
+ {PC: 6},
+ {PC: 8},
+ {PC: 9},
+ {PC: 10},
+ {PC: 13},
+ {PC: 15},
+ {PC: 16},
+ {PC: 17},
+ {PC: 185},
+ {PC: 186},
+ }
+
+ finalDepthTrace := []simulation.OpcodeTraceUnit{
+ {PC: 1},
+ {PC: 6},
+ {PC: 8},
+ {PC: 9},
+ {PC: 10},
+ {PC: 13},
+ {PC: 15},
+ {PC: 16},
+ {PC: 17},
+ {PC: 20},
+ {PC: 22},
+ {PC: 23},
+ {PC: 24},
+ {PC: 28},
+ {PC: 30},
+ {PC: 32},
+ {PC: 34},
+ {PC: 36},
+ {PC: 38},
+ {PC: 40},
+ {PC: 42},
+ {PC: 44},
+ {PC: 46},
+ {PC: 48},
+ {PC: 50},
+ {PC: 52},
+ {PC: 54},
+ {PC: 55},
+ {PC: 57},
+ {PC: 58},
+ {PC: 60},
+ {PC: 61},
+ {PC: 63},
+ {PC: 66},
+ {PC: 67},
+ {PC: 68},
+ {PC: 69},
+ {PC: 70},
+ {PC: 73},
+ {PC: 74},
+ {PC: 75},
+ {PC: 76},
+ {PC: 79},
+ {PC: 80},
+ }
+
+ return simulationTestCase{
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{
+ {signedCreateTxn, signedPaymentTxn, signedCallsMaxDepth},
+ },
+ TraceConfig: simulation.ExecTraceConfig{
+ Enable: true,
+ },
+ },
+ developerAPI: true,
+ expected: simulation.Result{
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
+ TxnGroups: []simulation.TxnGroupResult{
+ {
+ Txns: []simulation.TxnResult{
+ {
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: transactions.ApplyData{ApplicationID: futureAppID},
+ },
+ AppBudgetConsumed: 7,
+ Trace: &simulation.TransactionTrace{
+ ApprovalProgramTrace: creationOpcodeTrace,
+ },
+ },
+ {
+ Trace: &simulation.TransactionTrace{},
+ },
+ {
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: transactions.ApplyData{
+ ApplicationID: 0,
+ EvalDelta: transactions.EvalDelta{
+ Logs: []string{string(uint64ToBytes(1 << MaxDepth))},
+ InnerTxns: []transactions.SignedTxnWithAD{
+ {
+ ApplyData: transactions.ApplyData{ApplicationID: futureAppID + 3},
+ },
+ {},
+ {},
+ {},
+ {
+ ApplyData: transactions.ApplyData{
+ EvalDelta: transactions.EvalDelta{
+ Logs: []string{string(uint64ToBytes(1 << (MaxDepth - 1)))},
+ InnerTxns: []transactions.SignedTxnWithAD{
+ {
+ ApplyData: transactions.ApplyData{ApplicationID: futureAppID + 8},
+ },
+ {},
+ {},
+ {},
+ {
+ ApplyData: transactions.ApplyData{
+ EvalDelta: transactions.EvalDelta{
+ Logs: []string{string(uint64ToBytes(1 << (MaxDepth - 2)))},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ AppBudgetConsumed: 378,
+ Trace: &simulation.TransactionTrace{
+ ApprovalProgramTrace: recursiveLongOpcodeTrace,
+ InnerTraces: []simulation.TransactionTrace{
+ {
+ ApprovalProgramTrace: creationOpcodeTrace,
+ },
+ {},
+ {
+ ApprovalProgramTrace: optInTrace,
+ },
+ {
+ ClearStateProgramTrace: clearStateOpcodeTrace,
+ },
+ {
+ ApprovalProgramTrace: recursiveLongOpcodeTrace,
+ InnerTraces: []simulation.TransactionTrace{
+ {
+ ApprovalProgramTrace: creationOpcodeTrace,
+ },
+ {},
+ {
+ ApprovalProgramTrace: optInTrace,
+ },
+ {
+ ClearStateProgramTrace: clearStateOpcodeTrace,
+ },
+ {
+ ApprovalProgramTrace: finalDepthTrace,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ AppBudgetAdded: 4200,
+ AppBudgetConsumed: 385,
+ },
+ },
+ TraceConfig: simulation.ExecTraceConfig{
+ Enable: true,
+ },
+ },
+ }
+ })
+}
+
+func goValuesToTealValues(goValues ...interface{}) []basics.TealValue {
+ if len(goValues) == 0 {
+ return nil
+ }
+
+ boolToUint64 := func(b bool) uint64 {
+ if b {
+ return 1
+ }
+ return 0
+ }
+
+ modelValues := make([]basics.TealValue, len(goValues))
+ for i, goValue := range goValues {
+ switch convertedValue := goValue.(type) {
+ case []byte:
+ modelValues[i] = basics.TealValue{
+ Type: basics.TealBytesType,
+ Bytes: string(convertedValue),
+ }
+ case string:
+ modelValues[i] = basics.TealValue{
+ Type: basics.TealBytesType,
+ Bytes: string(convertedValue),
+ }
+ case bool:
+ modelValues[i] = basics.TealValue{
+ Type: basics.TealUintType,
+ Uint: boolToUint64(convertedValue),
+ }
+ case int:
+ modelValues[i] = basics.TealValue{
+ Type: basics.TealUintType,
+ Uint: uint64(convertedValue),
+ }
+ case basics.AppIndex:
+ modelValues[i] = basics.TealValue{
+ Type: basics.TealUintType,
+ Uint: uint64(convertedValue),
+ }
+ case uint64:
+ modelValues[i] = basics.TealValue{
+ Type: basics.TealUintType,
+ Uint: convertedValue,
+ }
+ default:
+ panic("unexpected type inferred from interface{}")
+ }
+ }
+ return modelValues
+}
+
+func TestLogicSigPCandStackExposure(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ op, err := logic.AssembleString(`#pragma version 8
+` + strings.Repeat(`byte "a"; keccak256; pop
+`, 2) + `int 1`)
+ require.NoError(t, err)
+ program := logic.Program(op.Program)
+ lsigAddr := basics.Address(crypto.HashObj(&program))
+
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+
+ payTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: sender.Addr,
+ Receiver: lsigAddr,
+ Amount: 1_000_000,
+ })
+ appCallTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: lsigAddr,
+ ApprovalProgram: `#pragma version 8
+byte "hello"; log; int 1`,
+ ClearStateProgram: "#pragma version 8\n int 1",
+ })
+
+ txntest.Group(&payTxn, &appCallTxn)
+
+ signedPayTxn := payTxn.Txn().Sign(sender.Sk)
+ signedAppCallTxn := appCallTxn.SignedTxn()
+ signedAppCallTxn.Lsig = transactions.LogicSig{Logic: program}
+
+ keccakBytes := ":\xc2%\x16\x8d\xf5B\x12\xa2\\\x1c\x01\xfd5\xbe\xbf\xea@\x8f\xda\xc2\xe3\x1d\xddo\x80\xa4\xbb\xf9\xa5\xf1\xcb"
+
+ return simulationTestCase{
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{
+ {signedPayTxn, signedAppCallTxn},
+ },
+ TraceConfig: simulation.ExecTraceConfig{
+ Enable: true,
+ Stack: true,
+ },
+ },
+ developerAPI: true,
+ expected: simulation.Result{
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
+ TraceConfig: simulation.ExecTraceConfig{
+ Enable: true,
+ Stack: true,
+ },
+ TxnGroups: []simulation.TxnGroupResult{
+ {
+ Txns: []simulation.TxnResult{
+ {
+ Trace: &simulation.TransactionTrace{},
+ },
+ {
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: transactions.ApplyData{
+ ApplicationID: 1002,
+ EvalDelta: transactions.EvalDelta{Logs: []string{"hello"}},
+ },
+ },
+ AppBudgetConsumed: 3,
+ LogicSigBudgetConsumed: 266,
+ Trace: &simulation.TransactionTrace{
+ ApprovalProgramTrace: []simulation.OpcodeTraceUnit{
+ {
+ PC: 1,
+ StackAdded: goValuesToTealValues("hello"),
+ },
+ {
+ PC: 8,
+ StackPopCount: 1,
+ },
+ {
+ PC: 9,
+ StackAdded: goValuesToTealValues(1),
+ },
+ },
+ LogicSigTrace: []simulation.OpcodeTraceUnit{
+ {
+ PC: 1,
+ },
+ {
+ PC: 5,
+ StackAdded: goValuesToTealValues("a"),
+ },
+ {
+ PC: 6,
+ StackAdded: goValuesToTealValues(keccakBytes),
+ StackPopCount: 1,
+ },
+ {
+ PC: 7,
+ StackPopCount: 1,
+ },
+ {
+ PC: 8,
+ StackAdded: goValuesToTealValues("a"),
+ },
+ {
+ PC: 9,
+ StackAdded: goValuesToTealValues(keccakBytes),
+ StackPopCount: 1,
+ },
+ {
+ PC: 10,
+ StackPopCount: 1,
+ },
+ {
+ PC: 11,
+ StackAdded: goValuesToTealValues(1),
+ },
+ },
+ },
+ },
+ },
+ AppBudgetAdded: 700,
+ AppBudgetConsumed: 3,
+ },
+ },
+ },
+ }
+ })
+}
+
+func TestFailingLogicSigPCandStack(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ op, err := logic.AssembleString(`#pragma version 8
+` + strings.Repeat(`byte "a"; keccak256; pop
+`, 2) + `int 0; int 1; -`)
+ require.NoError(t, err)
+ program := logic.Program(op.Program)
+ lsigAddr := basics.Address(crypto.HashObj(&program))
+
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+
+ payTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: sender.Addr,
+ Receiver: lsigAddr,
+ Amount: 1_000_000,
+ })
+ appCallTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: lsigAddr,
+ ApprovalProgram: `#pragma version 8
+byte "hello"; log; int 1`,
+ ClearStateProgram: "#pragma version 8\n int 1",
+ })
+
+ txntest.Group(&payTxn, &appCallTxn)
+
+ signedPayTxn := payTxn.Txn().Sign(sender.Sk)
+ signedAppCallTxn := appCallTxn.SignedTxn()
+ signedAppCallTxn.Lsig = transactions.LogicSig{Logic: program}
+
+ keccakBytes := ":\xc2%\x16\x8d\xf5B\x12\xa2\\\x1c\x01\xfd5\xbe\xbf\xea@\x8f\xda\xc2\xe3\x1d\xddo\x80\xa4\xbb\xf9\xa5\xf1\xcb"
+
+ return simulationTestCase{
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{
+ {signedPayTxn, signedAppCallTxn},
+ },
+ TraceConfig: simulation.ExecTraceConfig{
+ Enable: true,
+ Stack: true,
+ },
+ },
+ developerAPI: true,
+ expectedError: "rejected by logic",
+ expected: simulation.Result{
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
+ TraceConfig: simulation.ExecTraceConfig{
+ Enable: true,
+ Stack: true,
+ },
+ TxnGroups: []simulation.TxnGroupResult{
+ {
+ FailedAt: simulation.TxnPath{1},
+ Txns: []simulation.TxnResult{
+ {},
+ {
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: transactions.ApplyData{},
+ },
+ LogicSigBudgetConsumed: 268,
+ Trace: &simulation.TransactionTrace{
+ LogicSigTrace: []simulation.OpcodeTraceUnit{
+ {
+ PC: 1,
+ },
+ {
+ PC: 5,
+ StackAdded: goValuesToTealValues("a"),
+ },
+ {
+ PC: 6,
+ StackAdded: goValuesToTealValues(keccakBytes),
+ StackPopCount: 1,
+ },
+ {
+ PC: 7,
+ StackPopCount: 1,
+ },
+ {
+ PC: 8,
+ StackAdded: goValuesToTealValues("a"),
+ },
+ {
+ PC: 9,
+ StackAdded: goValuesToTealValues(keccakBytes),
+ StackPopCount: 1,
+ },
+ {
+ PC: 10,
+ StackPopCount: 1,
+ },
+ {
+ PC: 11,
+ StackAdded: goValuesToTealValues(0),
+ },
+ {
+ PC: 13,
+ StackAdded: goValuesToTealValues(1),
+ },
+ {
+ PC: 15,
+ StackPopCount: 2,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+ })
+}
+
+func TestFailingApp(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ op, err := logic.AssembleString(`#pragma version 8
+` + strings.Repeat(`byte "a"; keccak256; pop
+`, 2) + `int 1`)
+ require.NoError(t, err)
+ program := logic.Program(op.Program)
+ lsigAddr := basics.Address(crypto.HashObj(&program))
+
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+
+ payTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: sender.Addr,
+ Receiver: lsigAddr,
+ Amount: 1_000_000,
+ })
+ appCallTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: lsigAddr,
+ ApprovalProgram: `#pragma version 8
+byte "hello"; log; int 0`,
+ ClearStateProgram: "#pragma version 8\n int 1",
+ })
+
+ txntest.Group(&payTxn, &appCallTxn)
+
+ signedPayTxn := payTxn.Txn().Sign(sender.Sk)
+ signedAppCallTxn := appCallTxn.SignedTxn()
+ signedAppCallTxn.Lsig = transactions.LogicSig{Logic: program}
+
+ return simulationTestCase{
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{
+ {signedPayTxn, signedAppCallTxn},
+ },
+ TraceConfig: simulation.ExecTraceConfig{
+ Enable: true,
+ },
+ },
+ developerAPI: true,
+ expectedError: "rejected by ApprovalProgram",
+ expected: simulation.Result{
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
+ TraceConfig: simulation.ExecTraceConfig{
+ Enable: true,
+ },
+ TxnGroups: []simulation.TxnGroupResult{
+ {
+ FailedAt: simulation.TxnPath{1},
+ Txns: []simulation.TxnResult{
+ {
+ Trace: &simulation.TransactionTrace{},
+ },
+ {
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: transactions.ApplyData{
+ ApplicationID: 1002,
+ EvalDelta: transactions.EvalDelta{Logs: []string{"hello"}},
+ },
+ },
+ AppBudgetConsumed: 3,
+ LogicSigBudgetConsumed: 266,
+ Trace: &simulation.TransactionTrace{
+ ApprovalProgramTrace: []simulation.OpcodeTraceUnit{
+ {PC: 1},
+ {PC: 8},
+ {PC: 9},
+ },
+ LogicSigTrace: []simulation.OpcodeTraceUnit{
+ {PC: 1},
+ {PC: 5},
+ {PC: 6},
+ {PC: 7},
+ {PC: 8},
+ {PC: 9},
+ {PC: 10},
+ {PC: 11},
+ },
+ },
+ },
+ },
+ AppBudgetAdded: 700,
+ AppBudgetConsumed: 3,
+ },
+ },
+ },
+ }
+ })
+}
+
+const FrameBuryDigProgram = `#pragma version 8
+txn ApplicationID // on creation, always approve
+bz end
+
+txn NumAppArgs
+int 1
+==
+assert
+
+txn ApplicationArgs 0
+btoi
+callsub subroutine_manipulating_stack
+itob
+log
+b end
+
+subroutine_manipulating_stack:
+ proto 1 1
+ int 0 // [0]
+ dup // [0, 0]
+ dupn 4 // [0, 0, 0, 0, 0, 0]
+ frame_dig -1 // [0, 0, 0, 0, 0, 0, arg_0]
+ frame_bury 0 // [arg_0, 0, 0, 0, 0, 0]
+ dig 5 // [arg_0, 0, 0, 0, 0, 0, arg_0]
+ cover 5 // [arg_0, arg_0, 0, 0, 0, 0, 0]
+ frame_dig 0 // [arg_0, arg_0, 0, 0, 0, 0, 0, arg_0]
+ frame_dig 1 // [arg_0, arg_0, 0, 0, 0, 0, 0, arg_0, arg_0]
+ + // [arg_0, arg_0, 0, 0, 0, 0, 0, arg_0 * 2]
+ bury 7 // [arg_0 * 2, arg_0, 0, 0, 0, 0, 0]
+ popn 5 // [arg_0 * 2, arg_0]
+ uncover 1 // [arg_0, arg_0 * 2]
+ swap // [arg_0 * 2, arg_0]
+ + // [arg_0 * 3]
+ pushbytess "1!" "5!" // [arg_0 * 3, "1!", "5!"]
+ pushints 0 2 1 1 5 18446744073709551615 // [arg_0 * 3, "1!", "5!", 0, 2, 1, 1, 5, 18446744073709551615]
+ store 1 // [arg_0 * 3, "1!", "5!", 0, 2, 1, 1, 5]
+ load 1 // [arg_0 * 3, "1!", "5!", 0, 2, 1, 1, 5, 18446744073709551615]
+ stores // [arg_0 * 3, "1!", "5!", 0, 2, 1, 1]
+ load 1 // [arg_0 * 3, "1!", "5!", 0, 2, 1, 1, 18446744073709551615]
+ store 1 // [arg_0 * 3, "1!", "5!", 0, 2, 1, 1]
+ retsub
+
+end:
+ int 1
+ return
+`
+
+func TestFrameBuryDigStackTrace(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+
+ futureAppID := basics.AppIndex(1001)
+
+ applicationArg := 10
+
+ createTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: 0,
+ ApprovalProgram: FrameBuryDigProgram,
+ ClearStateProgram: `#pragma version 8
+int 1`,
+ })
+ payment := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: sender.Addr,
+ Receiver: futureAppID.Address(),
+ Amount: env.TxnInfo.CurrentProtocolParams().MinBalance,
+ })
+ callTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: futureAppID,
+ ApplicationArgs: [][]byte{{byte(applicationArg)}},
+ })
+ txntest.Group(&createTxn, &payment, &callTxn)
+
+ signedCreate := createTxn.Txn().Sign(sender.Sk)
+ signedPay := payment.Txn().Sign(sender.Sk)
+ signedAppCall := callTxn.Txn().Sign(sender.Sk)
+
+ return simulationTestCase{
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{
+ {signedCreate, signedPay, signedAppCall},
+ },
+ TraceConfig: simulation.ExecTraceConfig{
+ Enable: true,
+ Stack: true,
+ Scratch: true,
+ },
+ },
+ developerAPI: true,
+ expected: simulation.Result{
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
+ TraceConfig: simulation.ExecTraceConfig{
+ Enable: true,
+ Stack: true,
+ Scratch: true,
+ },
+ TxnGroups: []simulation.TxnGroupResult{
+ {
+ Txns: []simulation.TxnResult{
+ {
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: transactions.ApplyData{
+ ApplicationID: futureAppID,
+ },
+ },
+ AppBudgetConsumed: 5,
+ Trace: &simulation.TransactionTrace{
+ ApprovalProgramTrace: []simulation.OpcodeTraceUnit{
+ {
+ PC: 1,
+ },
+ {
+ PC: 4,
+ StackAdded: goValuesToTealValues(0),
+ },
+ {
+ PC: 6,
+ StackPopCount: 1,
+ },
+ {
+ PC: 90,
+ StackAdded: goValuesToTealValues(1),
+ },
+ {
+ PC: 91,
+ StackAdded: goValuesToTealValues(1),
+ StackPopCount: 1,
+ },
+ },
+ },
+ },
+ {
+ Trace: &simulation.TransactionTrace{},
+ },
+ {
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: transactions.ApplyData{
+ EvalDelta: transactions.EvalDelta{
+ Logs: []string{
+ string(uint64ToBytes(uint64(applicationArg * 3))),
+ },
+ },
+ },
+ },
+ AppBudgetConsumed: 39,
+ Trace: &simulation.TransactionTrace{
+ ApprovalProgramTrace: []simulation.OpcodeTraceUnit{
+ {
+ PC: 1,
+ },
+ {
+ PC: 4,
+ StackAdded: goValuesToTealValues(futureAppID),
+ },
+ {
+ PC: 6,
+ StackPopCount: 1,
+ },
+ {
+ PC: 9,
+ StackAdded: goValuesToTealValues(1),
+ },
+ {
+ PC: 11,
+ StackAdded: goValuesToTealValues(1),
+ },
+ {
+ PC: 12,
+ StackAdded: goValuesToTealValues(1),
+ StackPopCount: 2,
+ },
+ {
+ PC: 13,
+ StackPopCount: 1,
+ },
+ {
+ PC: 14,
+ StackAdded: goValuesToTealValues([]byte{byte(applicationArg)}),
+ },
+ {
+ PC: 17,
+ StackAdded: goValuesToTealValues(applicationArg),
+ StackPopCount: 1,
+ },
+ // call sub
+ {
+ PC: 18,
+ },
+ // proto
+ {
+ PC: 26,
+ },
+ {
+ PC: 29,
+ StackAdded: goValuesToTealValues(0),
+ },
+ // dup
+ {
+ PC: 31,
+ StackAdded: goValuesToTealValues(0, 0),
+ StackPopCount: 1,
+ },
+ // dupn 4
+ {
+ PC: 32,
+ StackAdded: goValuesToTealValues(0, 0, 0, 0, 0),
+ StackPopCount: 1,
+ },
+ // frame_dig -1
+ {
+ PC: 34,
+ StackAdded: goValuesToTealValues(applicationArg),
+ StackPopCount: 0,
+ },
+ // frame_bury 0
+ {
+ PC: 36,
+ StackAdded: goValuesToTealValues(applicationArg, 0, 0, 0, 0, 0),
+ StackPopCount: 7,
+ },
+ // dig 5
+ {
+ PC: 38,
+ StackAdded: goValuesToTealValues(applicationArg),
+ StackPopCount: 0,
+ },
+ // cover 5
+ {
+ PC: 40,
+ StackAdded: goValuesToTealValues(applicationArg, 0, 0, 0, 0, 0),
+ StackPopCount: 6,
+ },
+ // frame_dig 0
+ {
+ PC: 42,
+ StackAdded: goValuesToTealValues(applicationArg),
+ StackPopCount: 0,
+ },
+ // frame_dig 1
+ {
+ PC: 44,
+ StackAdded: goValuesToTealValues(applicationArg),
+ StackPopCount: 0,
+ },
+ // +
+ {
+ PC: 46,
+ StackAdded: goValuesToTealValues(applicationArg * 2),
+ StackPopCount: 2,
+ },
+ // bury 7
+ {
+ PC: 47,
+ StackAdded: goValuesToTealValues(applicationArg*2, applicationArg, 0, 0, 0, 0, 0),
+ StackPopCount: 8,
+ },
+ // popn 5
+ {
+ PC: 49,
+ StackPopCount: 5,
+ },
+ // uncover 1
+ {
+ PC: 51,
+ StackPopCount: 2,
+ StackAdded: goValuesToTealValues(applicationArg, applicationArg*2),
+ },
+ // swap
+ {
+ PC: 53,
+ StackAdded: goValuesToTealValues(applicationArg*2, applicationArg),
+ StackPopCount: 2,
+ },
+ // +
+ {
+ PC: 54,
+ StackAdded: goValuesToTealValues(applicationArg * 3),
+ StackPopCount: 2,
+ },
+ // pushbytess "1!" "5!"
+ {
+ PC: 55,
+ StackAdded: goValuesToTealValues("1!", "5!"),
+ },
+ // pushints 0 2 1 1 5 18446744073709551615
+ {
+ PC: 63,
+ StackAdded: goValuesToTealValues(0, 2, 1, 1, 5, uint64(math.MaxUint64)),
+ },
+ // store 1
+ {
+ PC: 80,
+ StackPopCount: 1,
+ ScratchSlotChanges: []simulation.ScratchChange{
+ {
+ Slot: 1,
+ NewValue: goValuesToTealValues(uint64(math.MaxUint64))[0],
+ },
+ },
+ },
+ // load 1
+ {
+ PC: 82,
+ StackAdded: goValuesToTealValues(uint64(math.MaxUint64)),
+ },
+ // stores
+ {
+ PC: 84,
+ StackPopCount: 2,
+ ScratchSlotChanges: []simulation.ScratchChange{
+ {
+ Slot: 5,
+ NewValue: goValuesToTealValues(uint64(math.MaxUint64))[0],
+ },
+ },
+ },
+ // load 1
+ {
+ PC: 85,
+ StackAdded: goValuesToTealValues(uint64(math.MaxUint64)),
+ },
+ // store 1
+ {
+ PC: 87,
+ StackPopCount: 1,
+ ScratchSlotChanges: []simulation.ScratchChange{
+ {
+ Slot: 1,
+ NewValue: goValuesToTealValues(uint64(math.MaxUint64))[0],
+ },
+ },
+ },
+ // retsub
+ {
+ PC: 89,
+ StackAdded: goValuesToTealValues(applicationArg * 3),
+ StackPopCount: 8,
+ },
+ // itob
+ {
+ PC: 21,
+ StackAdded: goValuesToTealValues(uint64ToBytes(uint64(applicationArg) * 3)),
+ StackPopCount: 1,
+ },
+ // log
+ {
+ PC: 22,
+ StackPopCount: 1,
+ },
+ // b end
+ {
+ PC: 23,
+ },
+ // int 1
+ {
+ PC: 90,
+ StackAdded: goValuesToTealValues(1),
+ },
+ // return
+ {
+ PC: 91,
+ StackAdded: goValuesToTealValues(1),
+ StackPopCount: 1,
+ },
+ },
+ },
+ },
+ },
+ AppBudgetAdded: 1400,
+ AppBudgetConsumed: 44,
+ },
+ },
+ },
+ }
+ })
+}
+
// TestBalanceChangesWithApp sends a payment transaction to a new account and confirms its balance
// within a subsequent app call
func TestBalanceChangesWithApp(t *testing.T) {
@@ -1634,7 +3023,7 @@ func TestOptionalSignaturesIncorrect(t *testing.T) {
env := simulationtesting.PrepareSimulatorTest(t)
defer env.Close()
- s := simulation.MakeSimulator(env.Ledger)
+ s := simulation.MakeSimulator(env.Ledger, false)
sender := env.Accounts[0]
stxn := env.TxnInfo.NewTxn(txntest.Txn{
diff --git a/ledger/simulation/simulator.go b/ledger/simulation/simulator.go
index 1bcd3a5ba..d11cf845b 100644
--- a/ledger/simulation/simulator.go
+++ b/ledger/simulation/simulator.go
@@ -43,6 +43,7 @@ type Request struct {
AllowEmptySignatures bool
AllowMoreLogging bool
ExtraOpcodeBudget uint64
+ TraceConfig ExecTraceConfig
}
// Latest is part of the LedgerForSimulator interface.
@@ -85,13 +86,15 @@ type EvalFailureError struct {
// Simulator is a transaction group simulator for the block evaluator.
type Simulator struct {
- ledger simulatorLedger
+ ledger simulatorLedger
+ developerAPI bool
}
// MakeSimulator creates a new simulator from a ledger.
-func MakeSimulator(ledger *data.Ledger) *Simulator {
+func MakeSimulator(ledger *data.Ledger, developerAPI bool) *Simulator {
return &Simulator{
- ledger: simulatorLedger{ledger, ledger.Latest()},
+ ledger: simulatorLedger{ledger, ledger.Latest()},
+ developerAPI: developerAPI,
}
}
@@ -192,14 +195,13 @@ func (s Simulator) simulateWithTracer(txgroup []transactions.SignedTxn, tracer l
// check that the extra budget is not exceeding simulation extra budget limit
if overrides.ExtraOpcodeBudget > MaxExtraOpcodeBudget {
- return nil,
- InvalidRequestError{
- SimulatorError{
- fmt.Errorf(
- "extra budget %d > simulation extra budget limit %d",
- overrides.ExtraOpcodeBudget, MaxExtraOpcodeBudget),
- },
- }
+ return nil, InvalidRequestError{
+ SimulatorError{
+ fmt.Errorf(
+ "extra budget %d > simulation extra budget limit %d",
+ overrides.ExtraOpcodeBudget, MaxExtraOpcodeBudget),
+ },
+ }
}
vb, err := s.evaluate(hdr, txgroup, tracer)
@@ -208,7 +210,10 @@ func (s Simulator) simulateWithTracer(txgroup []transactions.SignedTxn, tracer l
// Simulate simulates a transaction group using the simulator. Will error if the transaction group is not well-formed.
func (s Simulator) Simulate(simulateRequest Request) (Result, error) {
- simulatorTracer := makeEvalTracer(s.ledger.start, simulateRequest)
+ simulatorTracer, err := makeEvalTracer(s.ledger.start, simulateRequest, s.developerAPI)
+ if err != nil {
+ return Result{}, err
+ }
if len(simulateRequest.TxnGroups) != 1 {
return Result{}, InvalidRequestError{
diff --git a/ledger/simulation/simulator_test.go b/ledger/simulation/simulator_test.go
index 90eada3f4..7a28d951f 100644
--- a/ledger/simulation/simulator_test.go
+++ b/ledger/simulation/simulator_test.go
@@ -108,7 +108,7 @@ func TestSimulateWithTrace(t *testing.T) {
env := simulationtesting.PrepareSimulatorTest(t)
defer env.Close()
- s := MakeSimulator(env.Ledger)
+ s := MakeSimulator(env.Ledger, false)
sender := env.Accounts[0]
op, err := logic.AssembleString(`#pragma version 8
diff --git a/ledger/simulation/trace.go b/ledger/simulation/trace.go
index 1c112928c..574836097 100644
--- a/ledger/simulation/trace.go
+++ b/ledger/simulation/trace.go
@@ -35,6 +35,7 @@ type TxnResult struct {
Txn transactions.SignedTxnWithAD
AppBudgetConsumed uint64
LogicSigBudgetConsumed uint64
+ Trace *TransactionTrace
}
// TxnGroupResult contains the simulation result for a single transaction group
@@ -103,6 +104,15 @@ func (eo ResultEvalOverrides) LogicEvalConstants() logic.EvalConstants {
return logicEvalConstants
}
+// ExecTraceConfig gathers all execution trace related configs for simulation result
+type ExecTraceConfig struct {
+ _struct struct{} `codec:",omitempty"`
+
+ Enable bool `codec:"enable"`
+ Stack bool `codec:"stack-change"`
+ Scratch bool `codec:"scratch-change"`
+}
+
// Result contains the result from a call to Simulator.Simulate
type Result struct {
Version uint64
@@ -110,13 +120,51 @@ type Result struct {
TxnGroups []TxnGroupResult // this is a list so that supporting multiple in the future is not breaking
EvalOverrides ResultEvalOverrides
Block *ledgercore.ValidatedBlock
+ TraceConfig ExecTraceConfig
}
-func makeSimulationResultWithVersion(lastRound basics.Round, request Request, version uint64) (Result, error) {
- if version != ResultLatestVersion {
- return Result{}, fmt.Errorf("invalid SimulationResult version: %d", version)
+// ReturnTrace reads from Result object and decides if simulation returns PC.
+// It only reads Enable for any option combination must contain Enable field, or it won't make sense.
+// The other invalid options would be eliminated in validateSimulateRequest early.
+func (r Result) ReturnTrace() bool { return r.TraceConfig.Enable }
+
+// ReturnStackChange reads from Result object and decides if simulation return stack changes.
+func (r Result) ReturnStackChange() bool { return r.TraceConfig.Stack }
+
+// ReturnScratchChange tells if the simulation runs with scratch-change enabled.
+func (r Result) ReturnScratchChange() bool { return r.TraceConfig.Scratch }
+
+// validateSimulateRequest first checks relation between request and config variables, including developerAPI:
+// if `developerAPI` provided is turned off, this method would:
+// - error on asking for exec trace
+func validateSimulateRequest(request Request, developerAPI bool) error {
+ if !developerAPI && request.TraceConfig.Enable {
+ return InvalidRequestError{
+ SimulatorError{
+ err: fmt.Errorf("the local configuration of the node has `EnableDeveloperAPI` turned off, while requesting for execution trace"),
+ },
+ }
}
+ if !request.TraceConfig.Enable {
+ if request.TraceConfig.Stack {
+ return InvalidRequestError{
+ SimulatorError{
+ err: fmt.Errorf("basic trace must be enabled when enabling stack tracing"),
+ },
+ }
+ }
+ if request.TraceConfig.Scratch {
+ return InvalidRequestError{
+ SimulatorError{
+ err: fmt.Errorf("basic trace must be enabled when enabling scratch slot change tracing"),
+ },
+ }
+ }
+ }
+ return nil
+}
+func makeSimulationResult(lastRound basics.Round, request Request, developerAPI bool) (Result, error) {
groups := make([]TxnGroupResult, len(request.TxnGroups))
for i, txgroup := range request.TxnGroups {
@@ -128,19 +176,60 @@ func makeSimulationResultWithVersion(lastRound basics.Round, request Request, ve
ExtraOpcodeBudget: request.ExtraOpcodeBudget,
}.AllowMoreLogging(request.AllowMoreLogging)
+ if err := validateSimulateRequest(request, developerAPI); err != nil {
+ return Result{}, err
+ }
+
return Result{
- Version: version,
+ Version: ResultLatestVersion,
LastRound: lastRound,
TxnGroups: groups,
EvalOverrides: resultEvalConstants,
+ TraceConfig: request.TraceConfig,
}, nil
}
-func makeSimulationResult(lastRound basics.Round, request Request) Result {
- result, err := makeSimulationResultWithVersion(lastRound, request, ResultLatestVersion)
- if err != nil {
- // this should never happen, since we pass in ResultLatestVersion
- panic(err)
- }
- return result
+// ScratchChange represents a write operation into a scratch slot
+type ScratchChange struct {
+ // Slot stands for the scratch slot id get written to
+ Slot uint64
+
+ // NewValue is the stack value written to scratch slot
+ NewValue basics.TealValue
+}
+
+// OpcodeTraceUnit contains the trace effects of a single opcode evaluation.
+type OpcodeTraceUnit struct {
+ // The PC of the opcode being evaluated
+ PC uint64
+
+ // SpawnedInners contains the indexes of traces for inner transactions spawned by this opcode,
+ // if any. These indexes refer to the InnerTraces array of the TransactionTrace object containing
+ // this OpcodeTraceUnit.
+ SpawnedInners []int
+
+ // what has been added to stack
+ StackAdded []basics.TealValue
+
+ // deleted element number from stack
+ StackPopCount uint64
+
+ // ScratchSlotChanges stands for write operations into scratch slots
+ ScratchSlotChanges []ScratchChange
+}
+
+// TransactionTrace contains the trace effects of a single transaction evaluation (including its inners)
+type TransactionTrace struct {
+ // ApprovalProgramTrace stands for a slice of OpcodeTraceUnit over application call on approval program
+ ApprovalProgramTrace []OpcodeTraceUnit
+ // ClearStateProgramTrace stands for a slice of OpcodeTraceUnit over application call on clear-state program
+ ClearStateProgramTrace []OpcodeTraceUnit
+ // LogicSigTrace contains the trace for a logicsig evaluation, if the transaction is approved by a logicsig.
+ LogicSigTrace []OpcodeTraceUnit
+ // programTraceRef points to one of ApprovalProgramTrace, ClearStateProgramTrace, and LogicSigTrace during simulation.
+ programTraceRef *[]OpcodeTraceUnit
+ // InnerTraces contains the traces for inner transactions, if this transaction spawned any. This
+ // object only contains traces for inners that are immediate children of this transaction.
+ // Grandchild traces will be present inside the TransactionTrace of their parent.
+ InnerTraces []TransactionTrace
}
diff --git a/ledger/simulation/tracer.go b/ledger/simulation/tracer.go
index 2b30a2e7f..b56e13e42 100644
--- a/ledger/simulation/tracer.go
+++ b/ledger/simulation/tracer.go
@@ -23,6 +23,7 @@ import (
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/protocol"
)
// cursorEvalTracer is responsible for maintaining a TxnPath that points to the currently executing
@@ -82,11 +83,31 @@ type evalTracer struct {
result *Result
failedAt TxnPath
+
+ // execTraceStack keeps track of the call stack:
+ // from top level transaction to the current inner txn that contains latest TransactionTrace.
+ // NOTE: execTraceStack is used only for PC/Stack/Storage exposure.
+ execTraceStack []*TransactionTrace
+
+ // addCount and popCount keep track of the latest opcode change explanation from opcode.
+ addCount int
+ popCount int
+
+ // stackHeightAfterDeletion is calculated by stack height before opcode - stack element deletion number.
+ // NOTE: both stackChangeExplanation and stackHeightAfterDeletion are used only for Stack exposure.
+ stackHeightAfterDeletion int
+
+ // scratchSlots are the scratch slots changed on current opcode (currently either `store` or `stores`).
+ // NOTE: this field scratchSlots is used only for scratch change exposure.
+ scratchSlots []uint64
}
-func makeEvalTracer(lastRound basics.Round, request Request) *evalTracer {
- result := makeSimulationResult(lastRound, request)
- return &evalTracer{result: &result}
+func makeEvalTracer(lastRound basics.Round, request Request, developerAPI bool) (*evalTracer, error) {
+ result, err := makeSimulationResult(lastRound, request, developerAPI)
+ if err != nil {
+ return nil, err
+ }
+ return &evalTracer{result: &result}, nil
}
func (tracer *evalTracer) handleError(evalError error) {
@@ -163,9 +184,63 @@ func (tracer *evalTracer) saveApplyData(applyData transactions.ApplyData) {
applyDataOfCurrentTxn.EvalDelta = evalDelta
}
+func (tracer *evalTracer) BeforeTxn(ep *logic.EvalParams, groupIndex int) {
+ if tracer.result.ReturnTrace() {
+ var txnTraceStackElem *TransactionTrace
+
+ // Where should the current transaction trace attach to:
+ // - if it is a top level transaction, then attach to TxnResult level
+ // - if it is an inner transaction, then refer to the stack for latest exec trace,
+ // and attach to inner array
+ if len(tracer.execTraceStack) == 0 {
+ // to adapt to logic sig trace here, we separate into 2 cases:
+ // - if we already executed `Before/After-Program`,
+ // then there should be a trace containing logic sig.
+ // We should add the transaction type to the pre-existing execution trace.
+ // - otherwise, we take the simplest trace with transaction type.
+ if tracer.result.TxnGroups[0].Txns[groupIndex].Trace == nil {
+ tracer.result.TxnGroups[0].Txns[groupIndex].Trace = &TransactionTrace{}
+ }
+ txnTraceStackElem = tracer.result.TxnGroups[0].Txns[groupIndex].Trace
+ } else {
+ // we are reaching inner txns, so we don't have to be concerned about logic sig trace here
+ lastExecTrace := tracer.execTraceStack[len(tracer.execTraceStack)-1]
+ lastExecTrace.InnerTraces = append(lastExecTrace.InnerTraces, TransactionTrace{})
+ txnTraceStackElem = &lastExecTrace.InnerTraces[len(lastExecTrace.InnerTraces)-1]
+
+ innerIndex := len(lastExecTrace.InnerTraces) - 1
+ parentOpIndex := len(*lastExecTrace.programTraceRef) - 1
+
+ parentOp := &(*lastExecTrace.programTraceRef)[parentOpIndex]
+ parentOp.SpawnedInners = append(parentOp.SpawnedInners, innerIndex)
+ }
+
+ currentTxn := ep.TxnGroup[groupIndex]
+ if currentTxn.Txn.Type == protocol.ApplicationCallTx {
+ switch currentTxn.Txn.ApplicationCallTxnFields.OnCompletion {
+ case transactions.ClearStateOC:
+ txnTraceStackElem.programTraceRef = &txnTraceStackElem.ClearStateProgramTrace
+ default:
+ txnTraceStackElem.programTraceRef = &txnTraceStackElem.ApprovalProgramTrace
+ }
+ }
+
+ // In both case, we need to add to transaction trace to the stack
+ tracer.execTraceStack = append(tracer.execTraceStack, txnTraceStackElem)
+ }
+ tracer.cursorEvalTracer.BeforeTxn(ep, groupIndex)
+}
+
func (tracer *evalTracer) AfterTxn(ep *logic.EvalParams, groupIndex int, ad transactions.ApplyData, evalError error) {
tracer.handleError(evalError)
tracer.saveApplyData(ad)
+ // if the current transaction + simulation condition would lead to exec trace making
+ // we should clean them up from tracer.execTraceStack.
+ if tracer.result.ReturnTrace() {
+ lastOne := tracer.execTraceStack[len(tracer.execTraceStack)-1]
+ lastOne.programTraceRef = nil
+ tracer.execTraceStack = tracer.execTraceStack[:len(tracer.execTraceStack)-1]
+ }
tracer.cursorEvalTracer.AfterTxn(ep, groupIndex, ad, evalError)
}
@@ -178,21 +253,120 @@ func (tracer *evalTracer) saveEvalDelta(evalDelta transactions.EvalDelta, appIDT
applyDataOfCurrentTxn.EvalDelta.InnerTxns = inners
}
+func (tracer *evalTracer) makeOpcodeTraceUnit(cx *logic.EvalContext) OpcodeTraceUnit {
+ return OpcodeTraceUnit{PC: uint64(cx.PC())}
+}
+
+func (o *OpcodeTraceUnit) computeStackValueDeletions(cx *logic.EvalContext, tracer *evalTracer) {
+ tracer.popCount, tracer.addCount = cx.GetOpSpec().Explain(cx)
+ o.StackPopCount = uint64(tracer.popCount)
+
+ stackHeight := len(cx.Stack)
+ tracer.stackHeightAfterDeletion = stackHeight - int(o.StackPopCount)
+}
+
func (tracer *evalTracer) BeforeOpcode(cx *logic.EvalContext) {
- if cx.RunMode() != logic.ModeApp {
- // do nothing for LogicSig ops
- return
+ groupIndex := cx.GroupIndex()
+
+ if cx.RunMode() == logic.ModeApp {
+ // Remember app EvalDelta before executing the opcode. We do this
+ // because if this opcode fails, the block evaluator resets the EvalDelta.
+ var appIDToSave basics.AppIndex
+ if cx.TxnGroup[groupIndex].SignedTxn.Txn.ApplicationID == 0 {
+ // App creation
+ appIDToSave = cx.AppID()
+ }
+ tracer.saveEvalDelta(cx.TxnGroup[groupIndex].EvalDelta, appIDToSave)
+ }
+
+ if tracer.result.ReturnTrace() {
+ var txnTrace *TransactionTrace
+ if cx.RunMode() == logic.ModeSig {
+ txnTrace = tracer.result.TxnGroups[0].Txns[groupIndex].Trace
+ } else {
+ txnTrace = tracer.execTraceStack[len(tracer.execTraceStack)-1]
+ }
+ *txnTrace.programTraceRef = append(*txnTrace.programTraceRef, tracer.makeOpcodeTraceUnit(cx))
+
+ latestOpcodeTraceUnit := &(*txnTrace.programTraceRef)[len(*txnTrace.programTraceRef)-1]
+ if tracer.result.ReturnStackChange() {
+ latestOpcodeTraceUnit.computeStackValueDeletions(cx, tracer)
+ }
+ if tracer.result.ReturnScratchChange() {
+ tracer.recordChangedScratchSlots(cx)
+ }
+ }
+}
+
+func (o *OpcodeTraceUnit) appendAddedStackValue(cx *logic.EvalContext, tracer *evalTracer) {
+ for i := tracer.stackHeightAfterDeletion; i < len(cx.Stack); i++ {
+ tealValue := cx.Stack[i].ToTealValue()
+ o.StackAdded = append(o.StackAdded, basics.TealValue{
+ Type: tealValue.Type,
+ Uint: tealValue.Uint,
+ Bytes: tealValue.Bytes,
+ })
+ }
+}
+
+func (tracer *evalTracer) recordChangedScratchSlots(cx *logic.EvalContext) {
+ currentOpcodeName := cx.GetOpSpec().Name
+ last := len(cx.Stack) - 1
+ tracer.scratchSlots = nil
+
+ switch currentOpcodeName {
+ case "store":
+ slot := uint64(cx.GetProgram()[cx.PC()+1])
+ tracer.scratchSlots = append(tracer.scratchSlots, slot)
+ case "stores":
+ prev := last - 1
+ slot := cx.Stack[prev].Uint
+
+ // If something goes wrong for `stores`, we don't have to error here
+ // for in runtime already has evalError
+ if slot >= uint64(len(cx.Scratch)) {
+ return
+ }
+ tracer.scratchSlots = append(tracer.scratchSlots, slot)
}
- groupIndex := tracer.relativeGroupIndex()
- var appIDToSave basics.AppIndex
- if cx.TxnGroup[groupIndex].SignedTxn.Txn.ApplicationID == 0 {
- // app creation
- appIDToSave = cx.AppID()
+}
+
+func (tracer *evalTracer) recordUpdatedScratchVars(cx *logic.EvalContext) []ScratchChange {
+ if len(tracer.scratchSlots) == 0 {
+ return nil
}
- tracer.saveEvalDelta(cx.TxnGroup[groupIndex].EvalDelta, appIDToSave)
+ changes := make([]ScratchChange, len(tracer.scratchSlots))
+ for i, slot := range tracer.scratchSlots {
+ changes[i] = ScratchChange{
+ Slot: slot,
+ NewValue: cx.Scratch[slot].ToTealValue(),
+ }
+ }
+ return changes
}
func (tracer *evalTracer) AfterOpcode(cx *logic.EvalContext, evalError error) {
+ groupIndex := cx.GroupIndex()
+
+ // NOTE: only when we have no evalError on current opcode,
+ // we can proceed for recording stack chaange
+ if evalError == nil && tracer.result.ReturnTrace() {
+ var txnTrace *TransactionTrace
+ if cx.RunMode() == logic.ModeSig {
+ txnTrace = tracer.result.TxnGroups[0].Txns[groupIndex].Trace
+ } else {
+ txnTrace = tracer.execTraceStack[len(tracer.execTraceStack)-1]
+ }
+
+ latestOpcodeTraceUnit := &(*txnTrace.programTraceRef)[len(*txnTrace.programTraceRef)-1]
+ if tracer.result.ReturnStackChange() {
+ latestOpcodeTraceUnit.appendAddedStackValue(cx, tracer)
+ }
+ if tracer.result.ReturnScratchChange() {
+ latestOpcodeTraceUnit.ScratchSlotChanges = tracer.recordUpdatedScratchVars(cx)
+ }
+ }
+
if cx.RunMode() != logic.ModeApp {
// do nothing for LogicSig ops
return
@@ -200,10 +374,29 @@ func (tracer *evalTracer) AfterOpcode(cx *logic.EvalContext, evalError error) {
tracer.handleError(evalError)
}
+func (tracer *evalTracer) BeforeProgram(cx *logic.EvalContext) {
+ groupIndex := cx.GroupIndex()
+
+ // Before Program, activated for logic sig, happens before txn group execution
+ // we should create trace object for this txn result
+ if cx.RunMode() != logic.ModeApp {
+ if tracer.result.ReturnTrace() {
+ tracer.result.TxnGroups[0].Txns[groupIndex].Trace = &TransactionTrace{}
+ traceRef := tracer.result.TxnGroups[0].Txns[groupIndex].Trace
+ traceRef.programTraceRef = &traceRef.LogicSigTrace
+ }
+ }
+}
+
func (tracer *evalTracer) AfterProgram(cx *logic.EvalContext, evalError error) {
+ groupIndex := cx.GroupIndex()
+
if cx.RunMode() != logic.ModeApp {
// Report cost for LogicSig program and exit
- tracer.result.TxnGroups[0].Txns[cx.GroupIndex()].LogicSigBudgetConsumed = uint64(cx.Cost())
+ tracer.result.TxnGroups[0].Txns[groupIndex].LogicSigBudgetConsumed = uint64(cx.Cost())
+ if tracer.result.ReturnTrace() {
+ tracer.result.TxnGroups[0].Txns[groupIndex].Trace.programTraceRef = nil
+ }
return
}
diff --git a/ledger/spverificationtracker.go b/ledger/spverificationtracker.go
index d98974897..e3d3c214e 100644
--- a/ledger/spverificationtracker.go
+++ b/ledger/spverificationtracker.go
@@ -134,7 +134,7 @@ func (spt *spVerificationTracker) commitRound(ctx context.Context, tx trackerdb.
}
if dcc.spVerification.lastDeleteIndex >= 0 {
- err = tx.MakeSpVerificationCtxReaderWriter().DeleteOldSPContexts(ctx, dcc.spVerification.earliestLastAttestedRound)
+ err = tx.MakeSpVerificationCtxWriter().DeleteOldSPContexts(ctx, dcc.spVerification.earliestLastAttestedRound)
}
return err
@@ -146,7 +146,7 @@ func commitSPContexts(ctx context.Context, tx trackerdb.TransactionScope, commit
ptrToCtxs[i] = &commitData[i].verificationContext
}
- return tx.MakeSpVerificationCtxReaderWriter().StoreSPContexts(ctx, ptrToCtxs)
+ return tx.MakeSpVerificationCtxWriter().StoreSPContexts(ctx, ptrToCtxs)
}
func (spt *spVerificationTracker) postCommit(_ context.Context, dcc *deferredCommitContext) {
@@ -226,8 +226,8 @@ func (spt *spVerificationTracker) lookupContextInTrackedMemory(stateProofLastAtt
}
}
- return &ledgercore.StateProofVerificationContext{}, fmt.Errorf("%w for round %d: memory lookup failed",
- errSPVerificationContextNotFound, stateProofLastAttestedRound)
+ return &ledgercore.StateProofVerificationContext{}, fmt.Errorf("%w for round %d: memory lookup failed (pending len %d)",
+ errSPVerificationContextNotFound, stateProofLastAttestedRound, len(spt.pendingCommitContexts))
}
func (spt *spVerificationTracker) lookupContextInDB(stateProofLastAttestedRound basics.Round) (*ledgercore.StateProofVerificationContext, error) {
diff --git a/ledger/spverificationtracker_test.go b/ledger/spverificationtracker_test.go
index e0f073fe4..a88c05f0e 100644
--- a/ledger/spverificationtracker_test.go
+++ b/ledger/spverificationtracker_test.go
@@ -424,7 +424,7 @@ func TestStateProofVerificationTracker_LookupVerificationContext(t *testing.T) {
_, err := spt.LookupVerificationContext(basics.Round(0))
a.ErrorIs(err, errSPVerificationContextNotFound)
- a.ErrorContains(err, "no rows")
+ a.ErrorContains(err, "not found")
finalLastAttestedRound := basics.Round(defaultStateProofInterval + contextToAdd*defaultStateProofInterval)
_, err = spt.LookupVerificationContext(finalLastAttestedRound + basics.Round(defaultStateProofInterval))
diff --git a/ledger/store/merkle_committer.go b/ledger/store/merkle_committer.go
deleted file mode 100644
index bc7502dac..000000000
--- a/ledger/store/merkle_committer.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright (C) 2019-2023 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package store
-
-import "database/sql"
-
-// MerkleCommitter allows storing and loading merkletrie pages from a sqlite database.
-//
-//msgp:ignore MerkleCommitter
-type MerkleCommitter struct {
- tx *sql.Tx
- deleteStmt *sql.Stmt
- insertStmt *sql.Stmt
- selectStmt *sql.Stmt
-}
-
-// MakeMerkleCommitter creates a MerkleCommitter object that implements the merkletrie.Committer interface allowing storing and loading
-// merkletrie pages from a sqlite database.
-func MakeMerkleCommitter(tx *sql.Tx, staging bool) (mc *MerkleCommitter, err error) {
- mc = &MerkleCommitter{tx: tx}
- accountHashesTable := "accounthashes"
- if staging {
- accountHashesTable = "catchpointaccounthashes"
- }
- mc.deleteStmt, err = tx.Prepare("DELETE FROM " + accountHashesTable + " WHERE id=?")
- if err != nil {
- return nil, err
- }
- mc.insertStmt, err = tx.Prepare("INSERT OR REPLACE INTO " + accountHashesTable + "(id, data) VALUES(?, ?)")
- if err != nil {
- return nil, err
- }
- mc.selectStmt, err = tx.Prepare("SELECT data FROM " + accountHashesTable + " WHERE id = ?")
- if err != nil {
- return nil, err
- }
- return mc, nil
-}
-
-// StorePage is the merkletrie.Committer interface implementation, stores a single page in a sqlite database table.
-func (mc *MerkleCommitter) StorePage(page uint64, content []byte) error {
- if len(content) == 0 {
- _, err := mc.deleteStmt.Exec(page)
- return err
- }
- _, err := mc.insertStmt.Exec(page, content)
- return err
-}
-
-// LoadPage is the merkletrie.Committer interface implementation, load a single page from a sqlite database table.
-func (mc *MerkleCommitter) LoadPage(page uint64) (content []byte, err error) {
- err = mc.selectStmt.QueryRow(page).Scan(&content)
- if err == sql.ErrNoRows {
- content = nil
- err = nil
- return
- } else if err != nil {
- return nil, err
- }
- return content, nil
-}
diff --git a/ledger/store/testing/helpers.go b/ledger/store/testing/helpers.go
index 34ba3e3ff..b9e41ee28 100644
--- a/ledger/store/testing/helpers.go
+++ b/ledger/store/testing/helpers.go
@@ -29,7 +29,7 @@ import (
// DbOpenTest opens a db file for testing purposes.
func DbOpenTest(t testing.TB, inMemory bool) (db.Pair, string) {
- fn := fmt.Sprintf("%s.%d", strings.ReplaceAll(t.Name(), "/", "."), crypto.RandUint64())
+ fn := fmt.Sprintf("%s/%s.%d", t.TempDir(), strings.ReplaceAll(t.Name(), "/", "."), crypto.RandUint64())
dbs, err := db.OpenPair(fn, inMemory)
require.NoErrorf(t, err, "Filename : %s\nInMemory: %v", fn, inMemory)
return dbs, fn
diff --git a/ledger/store/trackerdb/catchpoint.go b/ledger/store/trackerdb/catchpoint.go
index dfbacbd86..7c67155d4 100644
--- a/ledger/store/trackerdb/catchpoint.go
+++ b/ledger/store/trackerdb/catchpoint.go
@@ -151,10 +151,7 @@ func MakeCatchpointFilePath(round basics.Round) string {
func RemoveSingleCatchpointFileFromDisk(dbDirectory, fileToDelete string) (err error) {
absCatchpointFileName := filepath.Join(dbDirectory, fileToDelete)
err = os.Remove(absCatchpointFileName)
- if err == nil || os.IsNotExist(err) {
- // it's ok if the file doesn't exist.
- err = nil
- } else {
+ if err != nil && !os.IsNotExist(err) {
// we can't delete the file, abort -
return fmt.Errorf("unable to delete old catchpoint file '%s' : %v", absCatchpointFileName, err)
}
diff --git a/ledger/store/trackerdb/data_test.go b/ledger/store/trackerdb/data_test.go
index 5bcd321aa..fe253304f 100644
--- a/ledger/store/trackerdb/data_test.go
+++ b/ledger/store/trackerdb/data_test.go
@@ -1079,6 +1079,8 @@ func TestLedgercoreAccountDataRoundtripConversion(t *testing.T) {
func TestBaseAccountDataIsEmpty(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
+
positiveTesting := func(t *testing.T) {
var ba BaseAccountData
require.True(t, ba.IsEmpty())
@@ -1110,11 +1112,11 @@ func TestBaseAccountDataIsEmpty(t *testing.T) {
t.Run("Positive", positiveTesting)
t.Run("Negative", negativeTesting)
t.Run("Structure", structureTesting)
-
}
func TestBaseOnlineAccountDataIsEmpty(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
positiveTesting := func(t *testing.T) {
var ba BaseOnlineAccountData
@@ -1162,6 +1164,7 @@ func TestBaseOnlineAccountDataIsEmpty(t *testing.T) {
func TestBaseOnlineAccountDataGettersSetters(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
proto := config.Consensus[protocol.ConsensusCurrentVersion]
addr := ledgertesting.RandomAddress()
@@ -1216,6 +1219,7 @@ func TestBaseOnlineAccountDataGettersSetters(t *testing.T) {
func TestBaseVotingDataGettersSetters(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
data := ledgertesting.RandomAccountData(1)
data.Status = basics.Online
@@ -1243,12 +1247,31 @@ func TestBaseVotingDataGettersSetters(t *testing.T) {
func TestBaseOnlineAccountDataReflect(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
require.Equal(t, 4, reflect.TypeOf(BaseOnlineAccountData{}).NumField(), "update all getters and setters for baseOnlineAccountData and change the field count")
}
func TestBaseVotingDataReflect(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
require.Equal(t, 7, reflect.TypeOf(BaseVotingData{}).NumField(), "update all getters and setters for baseVotingData and change the field count")
}
+
+// TestBaseAccountDataDecodeEmpty ensures no surprises when decoding nil/empty data.
+func TestBaseAccountDataDecodeEmpty(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ var b BaseAccountData
+
+ err := protocol.Decode([]byte{}, &b)
+ require.Error(t, err)
+
+ err = protocol.Decode(nil, &b)
+ require.Error(t, err)
+
+ err = protocol.Decode([]byte{0x80}, &b)
+ require.NoError(t, err)
+}
diff --git a/ledger/store/trackerdb/interface.go b/ledger/store/trackerdb/interface.go
index 9e9fbb1a1..2ddfa2020 100644
--- a/ledger/store/trackerdb/interface.go
+++ b/ledger/store/trackerdb/interface.go
@@ -18,6 +18,7 @@ package trackerdb
import (
"context"
+ "errors"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
@@ -26,6 +27,9 @@ import (
"github.com/algorand/go-algorand/ledger/ledgercore"
)
+// ErrNotFound is returned when a record is not found.
+var ErrNotFound = errors.New("trackerdb: not found")
+
// AccountRef is an opaque ref to an account in the db.
type AccountRef interface {
AccountRefMarker()
@@ -82,8 +86,6 @@ type AccountsWriterExt interface {
// AccountsReader is the "optimized" read interface for:
// - accounts, resources, app kvs, creatables
type AccountsReader interface {
- ListCreatables(maxIdx basics.CreatableIndex, maxResults uint64, ctype basics.CreatableType) (results []basics.CreatableLocator, dbRound basics.Round, err error)
-
LookupAccount(addr basics.Address) (data PersistedAccountData, err error)
LookupResources(addr basics.Address, aidx basics.CreatableIndex, ctype basics.CreatableType) (data PersistedResourcesData, err error)
@@ -103,7 +105,6 @@ type AccountsReaderExt interface {
AccountsTotals(ctx context.Context, catchpointStaging bool) (totals ledgercore.AccountTotals, err error)
AccountsHashRound(ctx context.Context) (hashrnd basics.Round, err error)
LookupAccountAddressFromAddressID(ctx context.Context, ref AccountRef) (address basics.Address, err error)
- LookupAccountDataByAddress(basics.Address) (ref AccountRef, data []byte, err error)
LookupAccountRowID(basics.Address) (ref AccountRef, err error)
LookupResourceDataByAddrID(accountRef AccountRef, aidx basics.CreatableIndex) (data []byte, err error)
TotalResources(ctx context.Context) (total uint64, err error)
@@ -117,7 +118,8 @@ type AccountsReaderExt interface {
OnlineAccountsAll(maxAccounts uint64) ([]PersistedOnlineAccountData, error)
LoadTxTail(ctx context.Context, dbRound basics.Round) (roundData []*TxTailRound, roundHash []crypto.Digest, baseRound basics.Round, err error)
LoadAllFullAccounts(ctx context.Context, balancesTable string, resourcesTable string, acctCb func(basics.Address, basics.AccountData)) (count int, err error)
- Testing() TestAccountsReaderExt
+ // testing
+ Testing() AccountsReaderTestExt
}
// AccountsReaderWriter is AccountsReader+AccountsWriter
@@ -140,7 +142,7 @@ type OnlineAccountsWriter interface {
// - online accounts
type OnlineAccountsReader interface {
LookupOnline(addr basics.Address, rnd basics.Round) (data PersistedOnlineAccountData, err error)
- LookupOnlineTotalsHistory(round basics.Round) (basics.MicroAlgos, error)
+ LookupOnlineRoundParams(rnd basics.Round) (onlineRoundParamsData ledgercore.OnlineRoundParamsData, err error)
LookupOnlineHistory(addr basics.Address) (result []PersistedOnlineAccountData, rnd basics.Round, err error)
Close()
diff --git a/ledger/store/trackerdb/msgp_gen.go b/ledger/store/trackerdb/msgp_gen.go
index e6fa865d6..203fd9fdb 100644
--- a/ledger/store/trackerdb/msgp_gen.go
+++ b/ledger/store/trackerdb/msgp_gen.go
@@ -6,8 +6,12 @@ import (
"github.com/algorand/msgp/msgp"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/merklesignature"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
)
// The following msgp objects are implemented in this file:
@@ -18,6 +22,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> BaseAccountDataMaxSize()
//
// BaseOnlineAccountData
// |-----> (*) MarshalMsg
@@ -26,6 +31,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> BaseOnlineAccountDataMaxSize()
//
// BaseVotingData
// |-----> (*) MarshalMsg
@@ -34,6 +40,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> BaseVotingDataMaxSize()
//
// CatchpointFirstStageInfo
// |-----> (*) MarshalMsg
@@ -42,6 +49,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> CatchpointFirstStageInfoMaxSize()
//
// ResourceFlags
// |-----> MarshalMsg
@@ -50,6 +58,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> ResourceFlagsMaxSize()
//
// ResourcesData
// |-----> (*) MarshalMsg
@@ -58,6 +67,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> ResourcesDataMaxSize()
//
// TxTailRound
// |-----> (*) MarshalMsg
@@ -66,6 +76,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> TxTailRoundMaxSize()
//
// TxTailRoundLease
// |-----> (*) MarshalMsg
@@ -74,6 +85,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> TxTailRoundLeaseMaxSize()
//
// MarshalMsg implements msgp.Marshaler
@@ -642,6 +654,12 @@ func (z *BaseAccountData) MsgIsZero() bool {
return ((*z).Status.MsgIsZero()) && ((*z).MicroAlgos.MsgIsZero()) && ((*z).RewardsBase == 0) && ((*z).RewardedMicroAlgos.MsgIsZero()) && ((*z).AuthAddr.MsgIsZero()) && ((*z).TotalAppSchemaNumUint == 0) && ((*z).TotalAppSchemaNumByteSlice == 0) && ((*z).TotalExtraAppPages == 0) && ((*z).TotalAssetParams == 0) && ((*z).TotalAssets == 0) && ((*z).TotalAppParams == 0) && ((*z).TotalAppLocalStates == 0) && ((*z).TotalBoxes == 0) && ((*z).TotalBoxBytes == 0) && ((*z).BaseVotingData.VoteID.MsgIsZero()) && ((*z).BaseVotingData.SelectionID.MsgIsZero()) && ((*z).BaseVotingData.VoteFirstValid.MsgIsZero()) && ((*z).BaseVotingData.VoteLastValid.MsgIsZero()) && ((*z).BaseVotingData.VoteKeyDilution == 0) && ((*z).BaseVotingData.StateProofID.MsgIsZero()) && ((*z).UpdateRound == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func BaseAccountDataMaxSize() (s int) {
+ s = 3 + 2 + basics.StatusMaxSize() + 2 + basics.MicroAlgosMaxSize() + 2 + msgp.Uint64Size + 2 + basics.MicroAlgosMaxSize() + 2 + basics.AddressMaxSize() + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint32Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + crypto.OneTimeSignatureVerifierMaxSize() + 2 + crypto.VRFVerifierMaxSize() + 2 + basics.RoundMaxSize() + 2 + basics.RoundMaxSize() + 2 + msgp.Uint64Size + 2 + merklesignature.CommitmentMaxSize() + 2 + msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *BaseOnlineAccountData) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -909,6 +927,12 @@ func (z *BaseOnlineAccountData) MsgIsZero() bool {
return ((*z).BaseVotingData.VoteID.MsgIsZero()) && ((*z).BaseVotingData.SelectionID.MsgIsZero()) && ((*z).BaseVotingData.VoteFirstValid.MsgIsZero()) && ((*z).BaseVotingData.VoteLastValid.MsgIsZero()) && ((*z).BaseVotingData.VoteKeyDilution == 0) && ((*z).BaseVotingData.StateProofID.MsgIsZero()) && ((*z).MicroAlgos.MsgIsZero()) && ((*z).RewardsBase == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func BaseOnlineAccountDataMaxSize() (s int) {
+ s = 1 + 2 + crypto.OneTimeSignatureVerifierMaxSize() + 2 + crypto.VRFVerifierMaxSize() + 2 + basics.RoundMaxSize() + 2 + basics.RoundMaxSize() + 2 + msgp.Uint64Size + 2 + merklesignature.CommitmentMaxSize() + 2 + basics.MicroAlgosMaxSize() + 2 + msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *BaseVotingData) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1130,6 +1154,12 @@ func (z *BaseVotingData) MsgIsZero() bool {
return ((*z).VoteID.MsgIsZero()) && ((*z).SelectionID.MsgIsZero()) && ((*z).VoteFirstValid.MsgIsZero()) && ((*z).VoteLastValid.MsgIsZero()) && ((*z).VoteKeyDilution == 0) && ((*z).StateProofID.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func BaseVotingDataMaxSize() (s int) {
+ s = 1 + 2 + crypto.OneTimeSignatureVerifierMaxSize() + 2 + crypto.VRFVerifierMaxSize() + 2 + basics.RoundMaxSize() + 2 + basics.RoundMaxSize() + 2 + msgp.Uint64Size + 2 + merklesignature.CommitmentMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *CatchpointFirstStageInfo) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1374,6 +1404,12 @@ func (z *CatchpointFirstStageInfo) MsgIsZero() bool {
return ((*z).Totals.MsgIsZero()) && ((*z).TrieBalancesHash.MsgIsZero()) && ((*z).TotalAccounts == 0) && ((*z).TotalKVs == 0) && ((*z).TotalChunks == 0) && ((*z).BiggestChunkLen == 0) && ((*z).StateProofVerificationHash.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func CatchpointFirstStageInfoMaxSize() (s int) {
+ s = 1 + 14 + ledgercore.AccountTotalsMaxSize() + 17 + crypto.DigestMaxSize() + 14 + msgp.Uint64Size + 9 + msgp.Uint64Size + 12 + msgp.Uint64Size + 13 + msgp.Uint64Size + 19 + crypto.DigestMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z ResourceFlags) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1420,6 +1456,12 @@ func (z ResourceFlags) MsgIsZero() bool {
return z == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func ResourceFlagsMaxSize() (s int) {
+ s = msgp.Uint8Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *ResourcesData) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2149,6 +2191,21 @@ func (z *ResourcesData) MsgIsZero() bool {
return ((*z).Total == 0) && ((*z).Decimals == 0) && ((*z).DefaultFrozen == false) && ((*z).UnitName == "") && ((*z).AssetName == "") && ((*z).URL == "") && ((*z).MetadataHash == ([32]byte{})) && ((*z).Manager.MsgIsZero()) && ((*z).Reserve.MsgIsZero()) && ((*z).Freeze.MsgIsZero()) && ((*z).Clawback.MsgIsZero()) && ((*z).Amount == 0) && ((*z).Frozen == false) && ((*z).SchemaNumUint == 0) && ((*z).SchemaNumByteSlice == 0) && ((*z).KeyValue.MsgIsZero()) && (len((*z).ApprovalProgram) == 0) && (len((*z).ClearStateProgram) == 0) && ((*z).GlobalState.MsgIsZero()) && ((*z).LocalStateSchemaNumUint == 0) && ((*z).LocalStateSchemaNumByteSlice == 0) && ((*z).GlobalStateSchemaNumUint == 0) && ((*z).GlobalStateSchemaNumByteSlice == 0) && ((*z).ExtraProgramPages == 0) && ((*z).ResourceFlags == 0) && ((*z).UpdateRound == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func ResourcesDataMaxSize() (s int) {
+ s = 3 + 2 + msgp.Uint64Size + 2 + msgp.Uint32Size + 2 + msgp.BoolSize + 2
+ panic("Unable to determine max size: String type z.UnitName is unbounded")
+ s += 2
+ panic("Unable to determine max size: String type z.AssetName is unbounded")
+ s += 2
+ panic("Unable to determine max size: String type z.URL is unbounded")
+ s += 2
+ // Calculating size of array: z.MetadataHash
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ s += 2 + basics.AddressMaxSize() + 2 + basics.AddressMaxSize() + 2 + basics.AddressMaxSize() + 2 + basics.AddressMaxSize() + 2 + msgp.Uint64Size + 2 + msgp.BoolSize + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + basics.TealKeyValueMaxSize() + 2 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 2 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 2 + basics.TealKeyValueMaxSize() + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + 2 + msgp.Uint32Size + 2 + msgp.Uint8Size + 2 + msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *TxTailRound) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2453,6 +2510,21 @@ func (z *TxTailRound) MsgIsZero() bool {
return (len((*z).TxnIDs) == 0) && (len((*z).LastValid) == 0) && (len((*z).Leases) == 0) && ((*z).Hdr.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func TxTailRoundMaxSize() (s int) {
+ s = 1 + 2
+ // Calculating size of slice: z.TxnIDs
+ panic("Slice z.TxnIDs is unbounded")
+ s += 2
+ // Calculating size of slice: z.LastValid
+ panic("Slice z.LastValid is unbounded")
+ s += 2
+ // Calculating size of slice: z.Leases
+ panic("Slice z.Leases is unbounded")
+ s += 2 + bookkeeping.BlockHeaderMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *TxTailRoundLease) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -2604,3 +2676,12 @@ func (z *TxTailRoundLease) Msgsize() (s int) {
func (z *TxTailRoundLease) MsgIsZero() bool {
return ((*z).Sender.MsgIsZero()) && ((*z).Lease == ([32]byte{})) && ((*z).TxnIdx == 0)
}
+
+// MaxSize returns a maximum valid message size for this message type
+func TxTailRoundLeaseMaxSize() (s int) {
+ s = 1 + 2 + basics.AddressMaxSize() + 2
+ // Calculating size of array: z.Lease
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ s += 7 + msgp.Uint64Size
+ return
+}
diff --git a/ledger/store/trackerdb/sqlitedriver/accountsV2.go b/ledger/store/trackerdb/sqlitedriver/accountsV2.go
index d6e733090..ec48265fa 100644
--- a/ledger/store/trackerdb/sqlitedriver/accountsV2.go
+++ b/ledger/store/trackerdb/sqlitedriver/accountsV2.go
@@ -56,20 +56,25 @@ func NewAccountsSQLReaderWriter(e db.Executable) *accountsV2ReaderWriter {
}
}
+// NewAccountsSQLReader creates an SQL reader
+func NewAccountsSQLReader(q db.Queryable) *accountsV2Reader {
+ return &accountsV2Reader{q: q, preparedStatements: make(map[string]*sql.Stmt)}
+}
+
// Testing returns this reader, exposed as an interface with test functions
-func (r *accountsV2Reader) Testing() trackerdb.TestAccountsReaderExt {
+func (r *accountsV2Reader) Testing() trackerdb.AccountsReaderTestExt {
return r
}
-func (r *accountsV2Reader) getOrPrepare(queryString string) (stmt *sql.Stmt, err error) {
+func (r *accountsV2Reader) getOrPrepare(queryString string) (*sql.Stmt, error) {
// fetch statement (use the query as the key)
if stmt, ok := r.preparedStatements[queryString]; ok {
return stmt, nil
}
// we do not have it, prepare it
- stmt, err = r.q.Prepare(queryString)
+ stmt, err := r.q.Prepare(queryString)
if err != nil {
- return
+ return nil, err
}
// cache the statement
r.preparedStatements[queryString] = stmt
@@ -433,21 +438,6 @@ func (r *accountsV2Reader) LookupAccountAddressFromAddressID(ctx context.Context
return
}
-func (r *accountsV2Reader) LookupAccountDataByAddress(addr basics.Address) (ref trackerdb.AccountRef, data []byte, err error) {
- // optimize this query for repeated usage
- selectStmt, err := r.getOrPrepare("SELECT rowid, data FROM accountbase WHERE address=?")
- if err != nil {
- return
- }
-
- var rowid int64
- err = selectStmt.QueryRow(addr[:]).Scan(&rowid, &data)
- if err != nil {
- return
- }
- return sqlRowRef{rowid}, data, err
-}
-
// LookupOnlineAccountDataByAddress looks up online account data by address.
func (r *accountsV2Reader) LookupOnlineAccountDataByAddress(addr basics.Address) (ref trackerdb.OnlineAccountRef, data []byte, err error) {
// optimize this query for repeated usage
@@ -458,7 +448,10 @@ func (r *accountsV2Reader) LookupOnlineAccountDataByAddress(addr basics.Address)
var rowid int64
err = selectStmt.QueryRow(addr[:]).Scan(&rowid, &data)
- if err != nil {
+ if err == sql.ErrNoRows {
+ err = trackerdb.ErrNotFound
+ return
+ } else if err != nil {
return
}
return sqlRowRef{rowid}, data, err
@@ -474,7 +467,10 @@ func (r *accountsV2Reader) LookupAccountRowID(addr basics.Address) (ref trackerd
var rowid int64
err = addrRowidStmt.QueryRow(addr[:]).Scan(&rowid)
- if err != nil {
+ if err == sql.ErrNoRows {
+ err = trackerdb.ErrNotFound
+ return
+ } else if err != nil {
return
}
return sqlRowRef{rowid}, err
@@ -483,7 +479,7 @@ func (r *accountsV2Reader) LookupAccountRowID(addr basics.Address) (ref trackerd
// LookupResourceDataByAddrID looks up the resource data by account rowid + resource aidx.
func (r *accountsV2Reader) LookupResourceDataByAddrID(accountRef trackerdb.AccountRef, aidx basics.CreatableIndex) (data []byte, err error) {
if accountRef == nil {
- return data, sql.ErrNoRows
+ return data, trackerdb.ErrNotFound
}
addrid := accountRef.(sqlRowRef).rowid
// optimize this query for repeated usage
@@ -493,7 +489,10 @@ func (r *accountsV2Reader) LookupResourceDataByAddrID(accountRef trackerdb.Accou
}
err = selectStmt.QueryRow(addrid, aidx).Scan(&data)
- if err != nil {
+ if err == sql.ErrNoRows {
+ err = trackerdb.ErrNotFound
+ return
+ } else if err != nil {
return
}
return data, err
diff --git a/ledger/store/trackerdb/sqlitedriver/catchpoint.go b/ledger/store/trackerdb/sqlitedriver/catchpoint.go
index 388749858..ef63b7a7f 100644
--- a/ledger/store/trackerdb/sqlitedriver/catchpoint.go
+++ b/ledger/store/trackerdb/sqlitedriver/catchpoint.go
@@ -52,6 +52,10 @@ func NewCatchpointSQLReaderWriter(e db.Executable) *catchpointReaderWriter {
}
}
+func makeCatchpointReader(e db.Queryable) trackerdb.CatchpointReader {
+ return &catchpointReader{q: e}
+}
+
func (cr *catchpointReader) GetCatchpoint(ctx context.Context, round basics.Round) (fileName string, catchpoint string, fileSize int64, err error) {
err = cr.q.QueryRowContext(ctx, "SELECT filename, catchpoint, filesize FROM storedcatchpoints WHERE round=?", int64(round)).Scan(&fileName, &catchpoint, &fileSize)
return
diff --git a/ledger/store/trackerdb/sqlitedriver/catchpointPendingHashesIter.go b/ledger/store/trackerdb/sqlitedriver/catchpointPendingHashesIter.go
index 32d2614e1..611c67c87 100644
--- a/ledger/store/trackerdb/sqlitedriver/catchpointPendingHashesIter.go
+++ b/ledger/store/trackerdb/sqlitedriver/catchpointPendingHashesIter.go
@@ -19,27 +19,29 @@ package sqlitedriver
import (
"context"
"database/sql"
+
+ "github.com/algorand/go-algorand/util/db"
)
// catchpointPendingHashesIterator allows us to iterate over the hashes in the catchpointpendinghashes table in their order.
type catchpointPendingHashesIterator struct {
hashCount int
- tx *sql.Tx
+ q db.Queryable
rows *sql.Rows
}
// MakeCatchpointPendingHashesIterator create a pending hashes iterator that retrieves the hashes in the catchpointpendinghashes table.
-func MakeCatchpointPendingHashesIterator(hashCount int, tx *sql.Tx) *catchpointPendingHashesIterator {
+func MakeCatchpointPendingHashesIterator(hashCount int, q db.Queryable) *catchpointPendingHashesIterator {
return &catchpointPendingHashesIterator{
hashCount: hashCount,
- tx: tx,
+ q: q,
}
}
// Next returns an array containing the hashes, returning HashCount hashes at a time.
func (iterator *catchpointPendingHashesIterator) Next(ctx context.Context) (hashes [][]byte, err error) {
if iterator.rows == nil {
- iterator.rows, err = iterator.tx.QueryContext(ctx, "SELECT data FROM catchpointpendinghashes ORDER BY data")
+ iterator.rows, err = iterator.q.QueryContext(ctx, "SELECT data FROM catchpointpendinghashes ORDER BY data")
if err != nil {
return
}
diff --git a/ledger/store/trackerdb/sqlitedriver/encodedAccountsIter.go b/ledger/store/trackerdb/sqlitedriver/encodedAccountsIter.go
index f48a4f82c..6dc3b6722 100644
--- a/ledger/store/trackerdb/sqlitedriver/encodedAccountsIter.go
+++ b/ledger/store/trackerdb/sqlitedriver/encodedAccountsIter.go
@@ -23,12 +23,13 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/ledger/encoded"
"github.com/algorand/go-algorand/ledger/store/trackerdb"
+ "github.com/algorand/go-algorand/util/db"
"github.com/algorand/msgp/msgp"
)
// encodedAccountsBatchIter allows us to iterate over the accounts data stored in the accountbase table.
type encodedAccountsBatchIter struct {
- tx *sql.Tx
+ q db.Queryable
accountsRows *sql.Rows
resourcesRows *sql.Rows
nextBaseRow pendingBaseRow
@@ -45,21 +46,21 @@ type catchpointAccountResourceCounter struct {
}
// MakeEncodedAccoutsBatchIter creates an empty accounts batch iterator.
-func MakeEncodedAccoutsBatchIter(tx *sql.Tx) *encodedAccountsBatchIter {
- return &encodedAccountsBatchIter{tx: tx}
+func MakeEncodedAccoutsBatchIter(q db.Queryable) *encodedAccountsBatchIter {
+ return &encodedAccountsBatchIter{q: q}
}
// Next returns an array containing the account data, in the same way it appear in the database
// returning accountCount accounts data at a time.
func (iterator *encodedAccountsBatchIter) Next(ctx context.Context, accountCount int, resourceCount int) (bals []encoded.BalanceRecordV6, numAccountsProcessed uint64, err error) {
if iterator.accountsRows == nil {
- iterator.accountsRows, err = iterator.tx.QueryContext(ctx, "SELECT rowid, address, data FROM accountbase ORDER BY rowid")
+ iterator.accountsRows, err = iterator.q.QueryContext(ctx, "SELECT rowid, address, data FROM accountbase ORDER BY rowid")
if err != nil {
return
}
}
if iterator.resourcesRows == nil {
- iterator.resourcesRows, err = iterator.tx.QueryContext(ctx, "SELECT addrid, aidx, data FROM resources ORDER BY addrid, aidx")
+ iterator.resourcesRows, err = iterator.q.QueryContext(ctx, "SELECT addrid, aidx, data FROM resources ORDER BY addrid, aidx")
if err != nil {
return
}
diff --git a/ledger/store/trackerdb/sqlitedriver/kvsIter.go b/ledger/store/trackerdb/sqlitedriver/kvsIter.go
index 9cd1c9a7a..3beb6ae0d 100644
--- a/ledger/store/trackerdb/sqlitedriver/kvsIter.go
+++ b/ledger/store/trackerdb/sqlitedriver/kvsIter.go
@@ -19,22 +19,24 @@ package sqlitedriver
import (
"context"
"database/sql"
+
+ "github.com/algorand/go-algorand/util/db"
)
type kvsIter struct {
- tx *sql.Tx
+ q db.Queryable
rows *sql.Rows
}
// MakeKVsIter creates a KV iterator.
-func MakeKVsIter(ctx context.Context, tx *sql.Tx) (*kvsIter, error) {
- rows, err := tx.QueryContext(ctx, "SELECT key, value FROM kvstore")
+func MakeKVsIter(ctx context.Context, q db.Queryable) (*kvsIter, error) {
+ rows, err := q.QueryContext(ctx, "SELECT key, value FROM kvstore")
if err != nil {
return nil, err
}
return &kvsIter{
- tx: tx,
+ q: q,
rows: rows,
}, nil
}
diff --git a/ledger/store/trackerdb/sqlitedriver/merkle_commiter.go b/ledger/store/trackerdb/sqlitedriver/merkle_commiter.go
index 41589345f..052dfdac9 100644
--- a/ledger/store/trackerdb/sqlitedriver/merkle_commiter.go
+++ b/ledger/store/trackerdb/sqlitedriver/merkle_commiter.go
@@ -16,11 +16,15 @@
package sqlitedriver
-import "database/sql"
+import (
+ "database/sql"
+
+ "github.com/algorand/go-algorand/util/db"
+)
//msgp:ignore MerkleCommitter
type merkleCommitter struct {
- tx *sql.Tx
+ e db.Executable
deleteStmt *sql.Stmt
insertStmt *sql.Stmt
selectStmt *sql.Stmt
@@ -28,21 +32,21 @@ type merkleCommitter struct {
// MakeMerkleCommitter creates a MerkleCommitter object that implements the merkletrie.Committer interface allowing storing and loading
// merkletrie pages from a sqlite database.
-func MakeMerkleCommitter(tx *sql.Tx, staging bool) (mc *merkleCommitter, err error) {
- mc = &merkleCommitter{tx: tx}
+func MakeMerkleCommitter(e db.Executable, staging bool) (mc *merkleCommitter, err error) {
+ mc = &merkleCommitter{e: e}
accountHashesTable := "accounthashes"
if staging {
accountHashesTable = "catchpointaccounthashes"
}
- mc.deleteStmt, err = tx.Prepare("DELETE FROM " + accountHashesTable + " WHERE id=?")
+ mc.deleteStmt, err = e.Prepare("DELETE FROM " + accountHashesTable + " WHERE id=?")
if err != nil {
return nil, err
}
- mc.insertStmt, err = tx.Prepare("INSERT OR REPLACE INTO " + accountHashesTable + "(id, data) VALUES(?, ?)")
+ mc.insertStmt, err = e.Prepare("INSERT OR REPLACE INTO " + accountHashesTable + "(id, data) VALUES(?, ?)")
if err != nil {
return nil, err
}
- mc.selectStmt, err = tx.Prepare("SELECT data FROM " + accountHashesTable + " WHERE id = ?")
+ mc.selectStmt, err = e.Prepare("SELECT data FROM " + accountHashesTable + " WHERE id = ?")
if err != nil {
return nil, err
}
diff --git a/ledger/store/trackerdb/sqlitedriver/orderedAccountsIter.go b/ledger/store/trackerdb/sqlitedriver/orderedAccountsIter.go
index 2e2c40dbb..275200113 100644
--- a/ledger/store/trackerdb/sqlitedriver/orderedAccountsIter.go
+++ b/ledger/store/trackerdb/sqlitedriver/orderedAccountsIter.go
@@ -26,6 +26,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/ledger/store/trackerdb"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util/db"
)
// orderedAccountsIter allows us to iterate over the accounts addresses in the order of the account hashes.
@@ -34,7 +35,7 @@ type orderedAccountsIter struct {
accountBaseRows *sql.Rows
hashesRows *sql.Rows
resourcesRows *sql.Rows
- tx *sql.Tx
+ e db.Executable
pendingBaseRow pendingBaseRow
pendingResourceRow pendingResourceRow
accountCount int
@@ -84,9 +85,9 @@ type pendingResourceRow struct {
// MakeOrderedAccountsIter creates an ordered account iterator. Note that due to implementation reasons,
// only a single iterator can be active at a time.
-func MakeOrderedAccountsIter(tx *sql.Tx, accountCount int) *orderedAccountsIter {
+func MakeOrderedAccountsIter(e db.Executable, accountCount int) *orderedAccountsIter {
return &orderedAccountsIter{
- tx: tx,
+ e: e,
accountCount: accountCount,
step: oaiStepStartup,
}
@@ -104,7 +105,7 @@ func (iterator *orderedAccountsIter) Next(ctx context.Context) (acct []trackerdb
if iterator.step == oaiStepDeleteOldOrderingTable {
// although we're going to delete this table anyway when completing the iterator execution, we'll try to
// clean up any intermediate table.
- _, err = iterator.tx.ExecContext(ctx, "DROP TABLE IF EXISTS accountsiteratorhashes")
+ _, err = iterator.e.ExecContext(ctx, "DROP TABLE IF EXISTS accountsiteratorhashes")
if err != nil {
return
}
@@ -113,7 +114,7 @@ func (iterator *orderedAccountsIter) Next(ctx context.Context) (acct []trackerdb
}
if iterator.step == oaiStepCreateOrderingTable {
// create the temporary table
- _, err = iterator.tx.ExecContext(ctx, "CREATE TABLE accountsiteratorhashes(addrid INTEGER, hash blob)")
+ _, err = iterator.e.ExecContext(ctx, "CREATE TABLE accountsiteratorhashes(addrid INTEGER, hash blob)")
if err != nil {
return
}
@@ -122,17 +123,17 @@ func (iterator *orderedAccountsIter) Next(ctx context.Context) (acct []trackerdb
}
if iterator.step == oaiStepQueryAccounts {
// iterate over the existing accounts
- iterator.accountBaseRows, err = iterator.tx.QueryContext(ctx, "SELECT rowid, address, data FROM accountbase ORDER BY rowid")
+ iterator.accountBaseRows, err = iterator.e.QueryContext(ctx, "SELECT rowid, address, data FROM accountbase ORDER BY rowid")
if err != nil {
return
}
// iterate over the existing resources
- iterator.resourcesRows, err = iterator.tx.QueryContext(ctx, "SELECT addrid, aidx, data FROM resources ORDER BY addrid, aidx")
+ iterator.resourcesRows, err = iterator.e.QueryContext(ctx, "SELECT addrid, aidx, data FROM resources ORDER BY addrid, aidx")
if err != nil {
return
}
// prepare the insert statement into the temporary table
- iterator.insertStmt, err = iterator.tx.PrepareContext(ctx, "INSERT INTO accountsiteratorhashes(addrid, hash) VALUES(?, ?)")
+ iterator.insertStmt, err = iterator.e.PrepareContext(ctx, "INSERT INTO accountsiteratorhashes(addrid, hash) VALUES(?, ?)")
if err != nil {
return
}
@@ -153,12 +154,12 @@ func (iterator *orderedAccountsIter) Next(ctx context.Context) (acct []trackerdb
resCb := func(addr basics.Address, cidx basics.CreatableIndex, resData *trackerdb.ResourcesData, encodedResourceData []byte, lastResource bool) error {
if resData != nil {
- hash, err := trackerdb.ResourcesHashBuilderV6(resData, addr, cidx, resData.UpdateRound, encodedResourceData)
- if err != nil {
- return err
+ hash, err2 := trackerdb.ResourcesHashBuilderV6(resData, addr, cidx, resData.UpdateRound, encodedResourceData)
+ if err2 != nil {
+ return err2
}
- _, err = iterator.insertStmt.ExecContext(ctx, lastAddrID, hash)
- return err
+ _, err2 = iterator.insertStmt.ExecContext(ctx, lastAddrID, hash)
+ return err2
}
return nil
}
@@ -200,7 +201,7 @@ func (iterator *orderedAccountsIter) Next(ctx context.Context) (acct []trackerdb
if iterator.step == oaiStepCreateOrderingAccountIndex {
// create an index. It shown that even when we're making a single select statement in step 5, it would be better to have this index vs. not having it at all.
// note that this index is using the rowid of the accountsiteratorhashes table.
- _, err = iterator.tx.ExecContext(ctx, "CREATE INDEX accountsiteratorhashesidx ON accountsiteratorhashes(hash)")
+ _, err = iterator.e.ExecContext(ctx, "CREATE INDEX accountsiteratorhashesidx ON accountsiteratorhashes(hash)")
if err != nil {
iterator.Close(ctx)
return
@@ -210,7 +211,7 @@ func (iterator *orderedAccountsIter) Next(ctx context.Context) (acct []trackerdb
}
if iterator.step == oaiStepSelectFromOrderedTable {
// select the data from the ordered table
- iterator.hashesRows, err = iterator.tx.QueryContext(ctx, "SELECT addrid, hash FROM accountsiteratorhashes ORDER BY hash")
+ iterator.hashesRows, err = iterator.e.QueryContext(ctx, "SELECT addrid, hash FROM accountsiteratorhashes ORDER BY hash")
if err != nil {
iterator.Close(ctx)
@@ -272,7 +273,7 @@ func (iterator *orderedAccountsIter) Close(ctx context.Context) (err error) {
iterator.insertStmt.Close()
iterator.insertStmt = nil
}
- _, err = iterator.tx.ExecContext(ctx, "DROP TABLE IF EXISTS accountsiteratorhashes")
+ _, err = iterator.e.ExecContext(ctx, "DROP TABLE IF EXISTS accountsiteratorhashes")
return
}
@@ -418,7 +419,7 @@ func processAllResources(
count++
if resourceCount > 0 && count == resourceCount {
// last resource to be included in chunk
- err := callback(addr, aidx, &resData, buf, true)
+ err = callback(addr, aidx, &resData, buf, true)
return pendingResourceRow{}, count, err
}
err = callback(addr, aidx, &resData, buf, false)
diff --git a/ledger/store/trackerdb/sqlitedriver/schema.go b/ledger/store/trackerdb/sqlitedriver/schema.go
index 47cb0180d..ea6f57786 100644
--- a/ledger/store/trackerdb/sqlitedriver/schema.go
+++ b/ledger/store/trackerdb/sqlitedriver/schema.go
@@ -181,9 +181,9 @@ var accountsResetExprs = []string{
//
// accountsInit returns nil if either it has initialized the database
// correctly, or if the database has already been initialized.
-func accountsInit(tx *sql.Tx, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error) {
+func accountsInit(e db.Executable, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error) {
for _, tableCreate := range accountsSchema {
- _, err = tx.Exec(tableCreate)
+ _, err = e.Exec(tableCreate)
if err != nil {
return
}
@@ -191,11 +191,11 @@ func accountsInit(tx *sql.Tx, initAccounts map[basics.Address]basics.AccountData
// Run creatables migration if it hasn't run yet
var creatableMigrated bool
- err = tx.QueryRow("SELECT 1 FROM pragma_table_info('assetcreators') WHERE name='ctype'").Scan(&creatableMigrated)
+ err = e.QueryRow("SELECT 1 FROM pragma_table_info('assetcreators') WHERE name='ctype'").Scan(&creatableMigrated)
if err == sql.ErrNoRows {
// Run migration
for _, migrateCmd := range creatablesMigration {
- _, err = tx.Exec(migrateCmd)
+ _, err = e.Exec(migrateCmd)
if err != nil {
return
}
@@ -204,13 +204,13 @@ func accountsInit(tx *sql.Tx, initAccounts map[basics.Address]basics.AccountData
return
}
- _, err = tx.Exec("INSERT INTO acctrounds (id, rnd) VALUES ('acctbase', 0)")
+ _, err = e.Exec("INSERT INTO acctrounds (id, rnd) VALUES ('acctbase', 0)")
if err == nil {
var ot basics.OverflowTracker
var totals ledgercore.AccountTotals
for addr, data := range initAccounts {
- _, err = tx.Exec("INSERT INTO accountbase (address, data) VALUES (?, ?)",
+ _, err = e.Exec("INSERT INTO accountbase (address, data) VALUES (?, ?)",
addr[:], protocol.Encode(&data)) //nolint:gosec // Encode does not hold on to reference
if err != nil {
return true, err
@@ -224,7 +224,7 @@ func accountsInit(tx *sql.Tx, initAccounts map[basics.Address]basics.AccountData
return true, fmt.Errorf("overflow computing totals")
}
- arw := NewAccountsSQLReaderWriter(tx)
+ arw := NewAccountsSQLReaderWriter(e)
err = arw.AccountsPutTotals(totals, false)
if err != nil {
return true, err
@@ -245,9 +245,9 @@ func accountsInit(tx *sql.Tx, initAccounts map[basics.Address]basics.AccountData
// accountsAddNormalizedBalance adds the normalizedonlinebalance column
// to the accountbase table.
-func accountsAddNormalizedBalance(tx *sql.Tx, proto config.ConsensusParams) error {
+func accountsAddNormalizedBalance(e db.Executable, proto config.ConsensusParams) error {
var exists bool
- err := tx.QueryRow("SELECT 1 FROM pragma_table_info('accountbase') WHERE name='normalizedonlinebalance'").Scan(&exists)
+ err := e.QueryRow("SELECT 1 FROM pragma_table_info('accountbase') WHERE name='normalizedonlinebalance'").Scan(&exists)
if err == nil {
// Already exists.
return nil
@@ -257,13 +257,13 @@ func accountsAddNormalizedBalance(tx *sql.Tx, proto config.ConsensusParams) erro
}
for _, stmt := range createOnlineAccountIndex {
- _, err := tx.Exec(stmt)
+ _, err = e.Exec(stmt)
if err != nil {
return err
}
}
- rows, err := tx.Query("SELECT address, data FROM accountbase")
+ rows, err := e.Query("SELECT address, data FROM accountbase")
if err != nil {
return err
}
@@ -285,7 +285,7 @@ func accountsAddNormalizedBalance(tx *sql.Tx, proto config.ConsensusParams) erro
normBalance := data.NormalizedOnlineBalance(proto)
if normBalance > 0 {
- _, err = tx.Exec("UPDATE accountbase SET normalizedonlinebalance=? WHERE address=?", normBalance, addrbuf)
+ _, err = e.Exec("UPDATE accountbase SET normalizedonlinebalance=? WHERE address=?", normBalance, addrbuf)
if err != nil {
return err
}
@@ -296,9 +296,9 @@ func accountsAddNormalizedBalance(tx *sql.Tx, proto config.ConsensusParams) erro
}
// accountsCreateResourceTable creates the resource table in the database.
-func accountsCreateResourceTable(ctx context.Context, tx *sql.Tx) error {
+func accountsCreateResourceTable(ctx context.Context, e db.Executable) error {
var exists bool
- err := tx.QueryRowContext(ctx, "SELECT 1 FROM pragma_table_info('resources') WHERE name='addrid'").Scan(&exists)
+ err := e.QueryRowContext(ctx, "SELECT 1 FROM pragma_table_info('resources') WHERE name='addrid'").Scan(&exists)
if err == nil {
// Already exists.
return nil
@@ -307,7 +307,7 @@ func accountsCreateResourceTable(ctx context.Context, tx *sql.Tx) error {
return err
}
for _, stmt := range createResourcesTable {
- _, err = tx.ExecContext(ctx, stmt)
+ _, err = e.ExecContext(ctx, stmt)
if err != nil {
return err
}
@@ -315,9 +315,9 @@ func accountsCreateResourceTable(ctx context.Context, tx *sql.Tx) error {
return nil
}
-func accountsCreateOnlineAccountsTable(ctx context.Context, tx *sql.Tx) error {
+func accountsCreateOnlineAccountsTable(ctx context.Context, e db.Executable) error {
var exists bool
- err := tx.QueryRowContext(ctx, "SELECT 1 FROM pragma_table_info('onlineaccounts') WHERE name='address'").Scan(&exists)
+ err := e.QueryRowContext(ctx, "SELECT 1 FROM pragma_table_info('onlineaccounts') WHERE name='address'").Scan(&exists)
if err == nil {
// Already exists.
return nil
@@ -326,7 +326,7 @@ func accountsCreateOnlineAccountsTable(ctx context.Context, tx *sql.Tx) error {
return err
}
for _, stmt := range createOnlineAccountsTable {
- _, err = tx.ExecContext(ctx, stmt)
+ _, err = e.ExecContext(ctx, stmt)
if err != nil {
return err
}
@@ -335,9 +335,9 @@ func accountsCreateOnlineAccountsTable(ctx context.Context, tx *sql.Tx) error {
}
// accountsCreateBoxTable creates the KVStore table for box-storage in the database.
-func accountsCreateBoxTable(ctx context.Context, tx *sql.Tx) error {
+func accountsCreateBoxTable(ctx context.Context, e db.Executable) error {
var exists bool
- err := tx.QueryRow("SELECT 1 FROM pragma_table_info('kvstore') WHERE name='key'").Scan(&exists)
+ err := e.QueryRow("SELECT 1 FROM pragma_table_info('kvstore') WHERE name='key'").Scan(&exists)
if err == nil {
// already exists
return nil
@@ -346,7 +346,7 @@ func accountsCreateBoxTable(ctx context.Context, tx *sql.Tx) error {
return err
}
for _, stmt := range createBoxTable {
- _, err = tx.ExecContext(ctx, stmt)
+ _, err = e.ExecContext(ctx, stmt)
if err != nil {
return err
}
@@ -355,14 +355,14 @@ func accountsCreateBoxTable(ctx context.Context, tx *sql.Tx) error {
}
// performKVStoreNullBlobConversion scans keys with null blob value, and convert the value to `[]byte{}`.
-func performKVStoreNullBlobConversion(ctx context.Context, tx *sql.Tx) error {
- _, err := tx.ExecContext(ctx, "UPDATE kvstore SET value = '' WHERE value is NULL")
+func performKVStoreNullBlobConversion(ctx context.Context, e db.Executable) error {
+ _, err := e.ExecContext(ctx, "UPDATE kvstore SET value = '' WHERE value is NULL")
return err
}
-func accountsCreateTxTailTable(ctx context.Context, tx *sql.Tx) (err error) {
+func accountsCreateTxTailTable(ctx context.Context, e db.Executable) (err error) {
for _, stmt := range createTxTailTable {
- _, err = tx.ExecContext(ctx, stmt)
+ _, err = e.ExecContext(ctx, stmt)
if err != nil {
return
}
@@ -370,9 +370,9 @@ func accountsCreateTxTailTable(ctx context.Context, tx *sql.Tx) (err error) {
return nil
}
-func accountsCreateOnlineRoundParamsTable(ctx context.Context, tx *sql.Tx) (err error) {
+func accountsCreateOnlineRoundParamsTable(ctx context.Context, e db.Executable) (err error) {
for _, stmt := range createOnlineRoundParamsTable {
- _, err = tx.ExecContext(ctx, stmt)
+ _, err = e.ExecContext(ctx, stmt)
if err != nil {
return
}
@@ -396,7 +396,7 @@ func createStateProofVerificationTable(ctx context.Context, e db.Executable) err
}
// performResourceTableMigration migrate the database to use the resources table.
-func performResourceTableMigration(ctx context.Context, tx *sql.Tx, log func(processed, total uint64)) (err error) {
+func performResourceTableMigration(ctx context.Context, e db.Executable, log func(processed, total uint64)) (err error) {
now := time.Now().UnixNano()
idxnameBalances := fmt.Sprintf("onlineaccountbals_idx_%d", now)
idxnameAddress := fmt.Sprintf("accountbase_address_idx_%d", now)
@@ -418,7 +418,7 @@ func performResourceTableMigration(ctx context.Context, tx *sql.Tx, log func(pro
}
for _, stmt := range createNewAcctBase {
- _, err = tx.ExecContext(ctx, stmt)
+ _, err = e.ExecContext(ctx, stmt)
if err != nil {
return err
}
@@ -426,26 +426,26 @@ func performResourceTableMigration(ctx context.Context, tx *sql.Tx, log func(pro
var insertNewAcctBase *sql.Stmt
var insertResources *sql.Stmt
var insertNewAcctBaseNormBal *sql.Stmt
- insertNewAcctBase, err = tx.PrepareContext(ctx, "INSERT INTO accountbase_resources_migration(address, data) VALUES(?, ?)")
+ insertNewAcctBase, err = e.PrepareContext(ctx, "INSERT INTO accountbase_resources_migration(address, data) VALUES(?, ?)")
if err != nil {
return err
}
defer insertNewAcctBase.Close()
- insertNewAcctBaseNormBal, err = tx.PrepareContext(ctx, "INSERT INTO accountbase_resources_migration(address, data, normalizedonlinebalance) VALUES(?, ?, ?)")
+ insertNewAcctBaseNormBal, err = e.PrepareContext(ctx, "INSERT INTO accountbase_resources_migration(address, data, normalizedonlinebalance) VALUES(?, ?, ?)")
if err != nil {
return err
}
defer insertNewAcctBaseNormBal.Close()
- insertResources, err = tx.PrepareContext(ctx, "INSERT INTO resources(addrid, aidx, data) VALUES(?, ?, ?)")
+ insertResources, err = e.PrepareContext(ctx, "INSERT INTO resources(addrid, aidx, data) VALUES(?, ?, ?)")
if err != nil {
return err
}
defer insertResources.Close()
var rows *sql.Rows
- rows, err = tx.QueryContext(ctx, "SELECT address, data, normalizedonlinebalance FROM accountbase ORDER BY address")
+ rows, err = e.QueryContext(ctx, "SELECT address, data, normalizedonlinebalance FROM accountbase ORDER BY address")
if err != nil {
return err
}
@@ -457,7 +457,7 @@ func performResourceTableMigration(ctx context.Context, tx *sql.Tx, log func(pro
var processedAccounts uint64
var totalBaseAccounts uint64
- arw := NewAccountsSQLReaderWriter(tx)
+ arw := NewAccountsSQLReaderWriter(e)
totalBaseAccounts, err = arw.TotalAccounts(ctx)
if err != nil {
return err
@@ -501,12 +501,12 @@ func performResourceTableMigration(ctx context.Context, tx *sql.Tx, log func(pro
return err
}
insertResourceCallback := func(ctx context.Context, rowID int64, cidx basics.CreatableIndex, rd *trackerdb.ResourcesData) error {
- var err error
+ var err0 error
if rd != nil {
encodedData := protocol.Encode(rd)
- _, err = insertResources.ExecContext(ctx, rowID, cidx, encodedData)
+ _, err0 = insertResources.ExecContext(ctx, rowID, cidx, encodedData)
}
- return err
+ return err0
}
err = trackerdb.AccountDataResources(ctx, &accountData, rowID, insertResourceCallback)
if err != nil {
@@ -524,7 +524,7 @@ func performResourceTableMigration(ctx context.Context, tx *sql.Tx, log func(pro
}
for _, stmt := range applyNewAcctBase {
- _, err = tx.Exec(stmt)
+ _, err = e.Exec(stmt)
if err != nil {
return err
}
@@ -532,12 +532,12 @@ func performResourceTableMigration(ctx context.Context, tx *sql.Tx, log func(pro
return nil
}
-func performTxTailTableMigration(ctx context.Context, tx *sql.Tx, blockDb db.Accessor) (err error) {
- if tx == nil {
+func performTxTailTableMigration(ctx context.Context, e db.Executable, blockDb db.Accessor) (err error) {
+ if e == nil {
return nil
}
- arw := NewAccountsSQLReaderWriter(tx)
+ arw := NewAccountsSQLReaderWriter(e)
dbRound, err := arw.AccountsRound()
if err != nil {
return fmt.Errorf("latest block number cannot be retrieved : %w", err)
@@ -547,13 +547,13 @@ func performTxTailTableMigration(ctx context.Context, tx *sql.Tx, blockDb db.Acc
// when migrating there is only MaxTxnLife blocks in the block DB
// since the original txTail.commmittedUpTo preserved only (rnd+1)-MaxTxnLife = 1000 blocks back
err = blockDb.Atomic(func(ctx context.Context, blockTx *sql.Tx) error {
- latestBlockRound, err := blockdb.BlockLatest(blockTx)
- if err != nil {
- return fmt.Errorf("latest block number cannot be retrieved : %w", err)
+ latestBlockRound, blockErr := blockdb.BlockLatest(blockTx)
+ if blockErr != nil {
+ return fmt.Errorf("latest block number cannot be retrieved : %w", blockErr)
}
- latestHdr, err := blockdb.BlockGetHdr(blockTx, dbRound)
- if err != nil {
- return fmt.Errorf("latest block header %d cannot be retrieved : %w", dbRound, err)
+ latestHdr, hdrErr := blockdb.BlockGetHdr(blockTx, dbRound)
+ if hdrErr != nil {
+ return fmt.Errorf("latest block header %d cannot be retrieved : %w", dbRound, hdrErr)
}
proto := config.Consensus[latestHdr.CurrentProtocol]
@@ -567,7 +567,7 @@ func performTxTailTableMigration(ctx context.Context, tx *sql.Tx, blockDb db.Acc
if firstRound == basics.Round(0) {
firstRound++
}
- if _, err := blockdb.BlockGet(blockTx, firstRound); err != nil {
+ if _, getErr := blockdb.BlockGet(blockTx, firstRound); getErr != nil {
// looks like not catchpoint but a regular migration, start from maxTxnLife + deeperBlockHistory back
firstRound = (latestBlockRound + 1).SubSaturate(maxTxnLife + deeperBlockHistory)
if firstRound == basics.Round(0) {
@@ -576,14 +576,14 @@ func performTxTailTableMigration(ctx context.Context, tx *sql.Tx, blockDb db.Acc
}
tailRounds := make([][]byte, 0, maxTxnLife)
for rnd := firstRound; rnd <= dbRound; rnd++ {
- blk, err := blockdb.BlockGet(blockTx, rnd)
- if err != nil {
- return fmt.Errorf("block for round %d ( %d - %d ) cannot be retrieved : %w", rnd, firstRound, dbRound, err)
+ blk, getErr := blockdb.BlockGet(blockTx, rnd)
+ if getErr != nil {
+ return fmt.Errorf("block for round %d ( %d - %d ) cannot be retrieved : %w", rnd, firstRound, dbRound, getErr)
}
- tail, err := trackerdb.TxTailRoundFromBlock(blk)
- if err != nil {
- return err
+ tail, tErr := trackerdb.TxTailRoundFromBlock(blk)
+ if tErr != nil {
+ return tErr
}
encodedTail, _ := tail.Encode()
@@ -596,8 +596,8 @@ func performTxTailTableMigration(ctx context.Context, tx *sql.Tx, blockDb db.Acc
return err
}
-func performOnlineRoundParamsTailMigration(ctx context.Context, tx *sql.Tx, blockDb db.Accessor, newDatabase bool, initProto protocol.ConsensusVersion) (err error) {
- arw := NewAccountsSQLReaderWriter(tx)
+func performOnlineRoundParamsTailMigration(ctx context.Context, e db.Executable, blockDb db.Accessor, newDatabase bool, initProto protocol.ConsensusVersion) (err error) {
+ arw := NewAccountsSQLReaderWriter(e)
totals, err := arw.AccountsTotals(ctx, false)
if err != nil {
return err
@@ -611,9 +611,9 @@ func performOnlineRoundParamsTailMigration(ctx context.Context, tx *sql.Tx, bloc
currentProto = initProto
} else {
err = blockDb.Atomic(func(ctx context.Context, blockTx *sql.Tx) error {
- hdr, err := blockdb.BlockGetHdr(blockTx, rnd)
- if err != nil {
- return err
+ hdr, hdrErr := blockdb.BlockGetHdr(blockTx, rnd)
+ if hdrErr != nil {
+ return hdrErr
}
currentProto = hdr.CurrentProtocol
return nil
@@ -632,24 +632,24 @@ func performOnlineRoundParamsTailMigration(ctx context.Context, tx *sql.Tx, bloc
return arw.AccountsPutOnlineRoundParams(onlineRoundParams, rnd)
}
-func performOnlineAccountsTableMigration(ctx context.Context, tx *sql.Tx, progress func(processed, total uint64), log logging.Logger) (err error) {
+func performOnlineAccountsTableMigration(ctx context.Context, e db.Executable, progress func(processed, total uint64), log logging.Logger) (err error) {
var insertOnlineAcct *sql.Stmt
- insertOnlineAcct, err = tx.PrepareContext(ctx, "INSERT INTO onlineaccounts(address, data, normalizedonlinebalance, updround, votelastvalid) VALUES(?, ?, ?, ?, ?)")
+ insertOnlineAcct, err = e.PrepareContext(ctx, "INSERT INTO onlineaccounts(address, data, normalizedonlinebalance, updround, votelastvalid) VALUES(?, ?, ?, ?, ?)")
if err != nil {
return err
}
defer insertOnlineAcct.Close()
var updateAcct *sql.Stmt
- updateAcct, err = tx.PrepareContext(ctx, "UPDATE accountbase SET data = ? WHERE addrid = ?")
+ updateAcct, err = e.PrepareContext(ctx, "UPDATE accountbase SET data = ? WHERE addrid = ?")
if err != nil {
return err
}
defer updateAcct.Close()
var rows *sql.Rows
- rows, err = tx.QueryContext(ctx, "SELECT addrid, address, data, normalizedonlinebalance FROM accountbase")
+ rows, err = e.QueryContext(ctx, "SELECT addrid, address, data, normalizedonlinebalance FROM accountbase")
if err != nil {
return err
}
@@ -661,10 +661,10 @@ func performOnlineAccountsTableMigration(ctx context.Context, tx *sql.Tx, progre
var processedAccounts uint64
var totalOnlineBaseAccounts uint64
- arw := NewAccountsSQLReaderWriter(tx)
+ arw := NewAccountsSQLReaderWriter(e)
totalOnlineBaseAccounts, err = arw.TotalAccounts(ctx)
var total uint64
- err = tx.QueryRowContext(ctx, "SELECT count(1) FROM accountbase").Scan(&total)
+ err = e.QueryRowContext(ctx, "SELECT count(1) FROM accountbase").Scan(&total)
if err != nil {
if err != sql.ErrNoRows {
return err
@@ -763,7 +763,7 @@ func performOnlineAccountsTableMigration(ctx context.Context, tx *sql.Tx, progre
// update accounthashes for the modified accounts
if len(acctRehash) > 0 {
var count uint64
- err := tx.QueryRow("SELECT count(1) FROM accounthashes").Scan(&count)
+ err := e.QueryRow("SELECT count(1) FROM accounthashes").Scan(&count)
if err != nil {
return err
}
@@ -772,7 +772,7 @@ func performOnlineAccountsTableMigration(ctx context.Context, tx *sql.Tx, progre
return nil
}
- mc, err := MakeMerkleCommitter(tx, false)
+ mc, err := MakeMerkleCommitter(e, false)
if err != nil {
return nil
}
@@ -783,18 +783,18 @@ func performOnlineAccountsTableMigration(ctx context.Context, tx *sql.Tx, progre
}
for addr, state := range acctRehash {
deleteHash := trackerdb.AccountHashBuilderV6(addr, &state.old, state.oldEnc)
- deleted, err := trie.Delete(deleteHash)
- if err != nil {
- return fmt.Errorf("performOnlineAccountsTableMigration failed to delete hash '%s' from merkle trie for account %v: %w", hex.EncodeToString(deleteHash), addr, err)
+ deleted, delErr := trie.Delete(deleteHash)
+ if delErr != nil {
+ return fmt.Errorf("performOnlineAccountsTableMigration failed to delete hash '%s' from merkle trie for account %v: %w", hex.EncodeToString(deleteHash), addr, delErr)
}
if !deleted && log != nil {
log.Warnf("performOnlineAccountsTableMigration failed to delete hash '%s' from merkle trie for account %v", hex.EncodeToString(deleteHash), addr)
}
addHash := trackerdb.AccountHashBuilderV6(addr, &state.new, state.newEnc)
- added, err := trie.Add(addHash)
- if err != nil {
- return fmt.Errorf("performOnlineAccountsTableMigration attempted to add duplicate hash '%s' to merkle trie for account %v: %w", hex.EncodeToString(addHash), addr, err)
+ added, addErr := trie.Add(addHash)
+ if addErr != nil {
+ return fmt.Errorf("performOnlineAccountsTableMigration attempted to add duplicate hash '%s' to merkle trie for account %v: %w", hex.EncodeToString(addHash), addr, addErr)
}
if !added && log != nil {
log.Warnf("performOnlineAccountsTableMigration attempted to add duplicate hash '%s' to merkle trie for account %v", hex.EncodeToString(addHash), addr)
@@ -811,11 +811,11 @@ func performOnlineAccountsTableMigration(ctx context.Context, tx *sql.Tx, progre
// removeEmptyAccountData removes empty AccountData msgp-encoded entries from accountbase table
// and optionally returns list of addresses that were eliminated
-func removeEmptyAccountData(tx *sql.Tx, queryAddresses bool) (num int64, addresses []basics.Address, err error) {
+func removeEmptyAccountData(tx db.Executable, queryAddresses bool) (num int64, addresses []basics.Address, err error) {
if queryAddresses {
- rows, err := tx.Query("SELECT address FROM accountbase where length(data) = 1 and data = x'80'") // empty AccountData is 0x80
- if err != nil {
- return 0, nil, err
+ rows, qErr := tx.Query("SELECT address FROM accountbase where length(data) = 1 and data = x'80'") // empty AccountData is 0x80
+ if qErr != nil {
+ return 0, nil, qErr
}
defer rows.Close()
@@ -856,16 +856,16 @@ func removeEmptyAccountData(tx *sql.Tx, queryAddresses bool) (num int64, address
// reencodeAccounts reads all the accounts in the accountbase table, decode and reencode the account data.
// if the account data is found to have a different encoding, it would update the encoded account on disk.
// on return, it returns the number of modified accounts as well as an error ( if we had any )
-func reencodeAccounts(ctx context.Context, tx *sql.Tx) (modifiedAccounts uint, err error) {
+func reencodeAccounts(ctx context.Context, e db.Executable) (modifiedAccounts uint, err error) {
modifiedAccounts = 0
scannedAccounts := 0
- updateStmt, err := tx.PrepareContext(ctx, "UPDATE accountbase SET data = ? WHERE address = ?")
+ updateStmt, err := e.PrepareContext(ctx, "UPDATE accountbase SET data = ? WHERE address = ?")
if err != nil {
return 0, err
}
- rows, err := tx.QueryContext(ctx, "SELECT address, data FROM accountbase")
+ rows, err := e.QueryContext(ctx, "SELECT address, data FROM accountbase")
if err != nil {
return
}
@@ -880,7 +880,7 @@ func reencodeAccounts(ctx context.Context, tx *sql.Tx) (modifiedAccounts uint, e
if scannedAccounts%1000 == 0 {
// The return value from ResetTransactionWarnDeadline can be safely ignored here since it would only default to writing the warning
// message, which would let us know that it failed anyway.
- _, err = db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(time.Second))
+ _, err = db.ResetTransactionWarnDeadline(ctx, e, time.Now().Add(time.Second))
if err != nil {
return
}
@@ -913,13 +913,13 @@ func reencodeAccounts(ctx context.Context, tx *sql.Tx) (modifiedAccounts uint, e
}
// we need to update the encoded data.
- result, err := updateStmt.ExecContext(ctx, reencodedAccountData, addrbuf)
- if err != nil {
- return 0, err
+ result, rowsErr := updateStmt.ExecContext(ctx, reencodedAccountData, addrbuf)
+ if rowsErr != nil {
+ return 0, rowsErr
}
- rowsUpdated, err := result.RowsAffected()
- if err != nil {
- return 0, err
+ rowsUpdated, rowsErr := result.RowsAffected()
+ if rowsErr != nil {
+ return 0, rowsErr
}
if rowsUpdated != 1 {
return 0, fmt.Errorf("failed to update account %v, number of rows updated was %d instead of 1", addr, rowsUpdated)
@@ -932,8 +932,8 @@ func reencodeAccounts(ctx context.Context, tx *sql.Tx) (modifiedAccounts uint, e
return
}
-func convertOnlineRoundParamsTail(ctx context.Context, tx *sql.Tx) error {
+func convertOnlineRoundParamsTail(ctx context.Context, e db.Executable) error {
// create vote last index
- _, err := tx.ExecContext(ctx, createVoteLastValidIndex)
+ _, err := e.ExecContext(ctx, createVoteLastValidIndex)
return err
}
diff --git a/ledger/store/trackerdb/sqlitedriver/spVerificationAccessor.go b/ledger/store/trackerdb/sqlitedriver/spVerificationAccessor.go
index 3a024e0a9..0354a3c83 100644
--- a/ledger/store/trackerdb/sqlitedriver/spVerificationAccessor.go
+++ b/ledger/store/trackerdb/sqlitedriver/spVerificationAccessor.go
@@ -18,6 +18,7 @@ package sqlitedriver
import (
"context"
+ "database/sql"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/ledger/ledgercore"
@@ -66,7 +67,9 @@ func (spa *stateProofVerificationReader) LookupSPContext(stateProofLastAttestedR
row := spa.q.QueryRow("SELECT verificationcontext FROM stateproofverification WHERE lastattestedround=?", stateProofLastAttestedRound)
var buf []byte
err := row.Scan(&buf)
- if err != nil {
+ if err == sql.ErrNoRows {
+ return trackerdb.ErrNotFound
+ } else if err != nil {
return err
}
err = protocol.Decode(buf, &verificationContext)
diff --git a/ledger/store/trackerdb/sqlitedriver/sql.go b/ledger/store/trackerdb/sqlitedriver/sql.go
index 1c7becb91..7d5461446 100644
--- a/ledger/store/trackerdb/sqlitedriver/sql.go
+++ b/ledger/store/trackerdb/sqlitedriver/sql.go
@@ -30,7 +30,6 @@ import (
// accountsDbQueries is used to cache a prepared SQL statement to look up
// the state of a single account.
type accountsDbQueries struct {
- listCreatablesStmt *sql.Stmt
lookupAccountStmt *sql.Stmt
lookupResourcesStmt *sql.Stmt
lookupAllResourcesStmt *sql.Stmt
@@ -53,7 +52,7 @@ type accountsSQLWriter struct {
}
type onlineAccountsSQLWriter struct {
- insertStmt, updateStmt *sql.Stmt
+ insertStmt *sql.Stmt
}
type sqlRowRef struct {
@@ -70,11 +69,6 @@ func AccountsInitDbQueries(q db.Queryable) (*accountsDbQueries, error) {
var err error
qs := &accountsDbQueries{}
- qs.listCreatablesStmt, err = q.Prepare("SELECT acctrounds.rnd, assetcreators.asset, assetcreators.creator FROM acctrounds LEFT JOIN assetcreators ON assetcreators.asset <= ? AND assetcreators.ctype = ? WHERE acctrounds.id='acctbase' ORDER BY assetcreators.asset desc LIMIT ?")
- if err != nil {
- return nil, err
- }
-
qs.lookupAccountStmt, err = q.Prepare("SELECT accountbase.rowid, acctrounds.rnd, accountbase.data FROM acctrounds LEFT JOIN accountbase ON address=? WHERE id='acctbase'")
if err != nil {
return nil, err
@@ -131,16 +125,11 @@ func OnlineAccountsInitDbQueries(r db.Queryable) (*onlineAccountsDbQueries, erro
}
// MakeOnlineAccountsSQLWriter constructs an OnlineAccountsWriter backed by sql queries.
-func MakeOnlineAccountsSQLWriter(tx *sql.Tx, hasAccounts bool) (w *onlineAccountsSQLWriter, err error) {
+func MakeOnlineAccountsSQLWriter(e db.Executable, hasAccounts bool) (w *onlineAccountsSQLWriter, err error) {
w = new(onlineAccountsSQLWriter)
if hasAccounts {
- w.insertStmt, err = tx.Prepare("INSERT INTO onlineaccounts (address, normalizedonlinebalance, data, updround, votelastvalid) VALUES (?, ?, ?, ?, ?)")
- if err != nil {
- return
- }
-
- w.updateStmt, err = tx.Prepare("UPDATE onlineaccounts SET normalizedonlinebalance = ?, data = ?, updround = ?, votelastvalid =? WHERE rowid = ?")
+ w.insertStmt, err = e.Prepare("INSERT INTO onlineaccounts (address, normalizedonlinebalance, data, updround, votelastvalid) VALUES (?, ?, ?, ?, ?)")
if err != nil {
return
}
@@ -150,62 +139,62 @@ func MakeOnlineAccountsSQLWriter(tx *sql.Tx, hasAccounts bool) (w *onlineAccount
}
// MakeAccountsSQLWriter constructs an AccountsWriter backed by sql queries.
-func MakeAccountsSQLWriter(tx *sql.Tx, hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (w *accountsSQLWriter, err error) {
+func MakeAccountsSQLWriter(e db.Executable, hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (w *accountsSQLWriter, err error) {
w = new(accountsSQLWriter)
if hasAccounts {
- w.deleteByRowIDStmt, err = tx.Prepare("DELETE FROM accountbase WHERE rowid=?")
+ w.deleteByRowIDStmt, err = e.Prepare("DELETE FROM accountbase WHERE rowid=?")
if err != nil {
return
}
- w.insertStmt, err = tx.Prepare("INSERT INTO accountbase (address, normalizedonlinebalance, data) VALUES (?, ?, ?)")
+ w.insertStmt, err = e.Prepare("INSERT INTO accountbase (address, normalizedonlinebalance, data) VALUES (?, ?, ?)")
if err != nil {
return
}
- w.updateStmt, err = tx.Prepare("UPDATE accountbase SET normalizedonlinebalance = ?, data = ? WHERE rowid = ?")
+ w.updateStmt, err = e.Prepare("UPDATE accountbase SET normalizedonlinebalance = ?, data = ? WHERE rowid = ?")
if err != nil {
return
}
}
if hasResources {
- w.deleteResourceStmt, err = tx.Prepare("DELETE FROM resources WHERE addrid = ? AND aidx = ?")
+ w.deleteResourceStmt, err = e.Prepare("DELETE FROM resources WHERE addrid = ? AND aidx = ?")
if err != nil {
return
}
- w.insertResourceStmt, err = tx.Prepare("INSERT INTO resources(addrid, aidx, data) VALUES(?, ?, ?)")
+ w.insertResourceStmt, err = e.Prepare("INSERT INTO resources(addrid, aidx, data) VALUES(?, ?, ?)")
if err != nil {
return
}
- w.updateResourceStmt, err = tx.Prepare("UPDATE resources SET data = ? WHERE addrid = ? AND aidx = ?")
+ w.updateResourceStmt, err = e.Prepare("UPDATE resources SET data = ? WHERE addrid = ? AND aidx = ?")
if err != nil {
return
}
}
if hasKvPairs {
- w.upsertKvPairStmt, err = tx.Prepare("INSERT INTO kvstore (key, value) VALUES (?, ?) ON CONFLICT(key) DO UPDATE SET value=excluded.value")
+ w.upsertKvPairStmt, err = e.Prepare("INSERT INTO kvstore (key, value) VALUES (?, ?) ON CONFLICT(key) DO UPDATE SET value=excluded.value")
if err != nil {
return
}
- w.deleteKvPairStmt, err = tx.Prepare("DELETE FROM kvstore WHERE key=?")
+ w.deleteKvPairStmt, err = e.Prepare("DELETE FROM kvstore WHERE key=?")
if err != nil {
return
}
}
if hasCreatables {
- w.insertCreatableIdxStmt, err = tx.Prepare("INSERT INTO assetcreators (asset, creator, ctype) VALUES (?, ?, ?)")
+ w.insertCreatableIdxStmt, err = e.Prepare("INSERT INTO assetcreators (asset, creator, ctype) VALUES (?, ?, ?)")
if err != nil {
return
}
- w.deleteCreatableIdxStmt, err = tx.Prepare("DELETE FROM assetcreators WHERE asset=? AND ctype=?")
+ w.deleteCreatableIdxStmt, err = e.Prepare("DELETE FROM assetcreators WHERE asset=? AND ctype=?")
if err != nil {
return
}
@@ -213,39 +202,6 @@ func MakeAccountsSQLWriter(tx *sql.Tx, hasAccounts, hasResources, hasKvPairs, ha
return
}
-// ListCreatables returns an array of CreatableLocator which have CreatableIndex smaller or equal to maxIdx and are of the provided CreatableType.
-func (qs *accountsDbQueries) ListCreatables(maxIdx basics.CreatableIndex, maxResults uint64, ctype basics.CreatableType) (results []basics.CreatableLocator, dbRound basics.Round, err error) {
- err = db.Retry(func() error {
- // Query for assets in range
- rows, err := qs.listCreatablesStmt.Query(maxIdx, ctype, maxResults)
- if err != nil {
- return err
- }
- defer rows.Close()
-
- // For each row, copy into a new CreatableLocator and append to results
- var buf []byte
- var cl basics.CreatableLocator
- var creatableIndex sql.NullInt64
- for rows.Next() {
- err = rows.Scan(&dbRound, &creatableIndex, &buf)
- if err != nil {
- return err
- }
- if !creatableIndex.Valid {
- // we received an entry without any index. This would happen only on the first entry when there are no creatables of the requested type.
- break
- }
- cl.Index = basics.CreatableIndex(creatableIndex.Int64)
- copy(cl.Creator[:], buf)
- cl.Type = ctype
- results = append(results, cl)
- }
- return nil
- })
- return
-}
-
// sql.go has the following contradictory comments:
// Reference types such as []byte are only valid until the next call to Scan
@@ -476,6 +432,9 @@ func (qs *accountsDbQueries) LookupAccount(addr basics.Address) (data trackerdb.
data.Ref = sqlRowRef{rowid.Int64}
err = protocol.Decode(buf, &data.AccountData)
return err
+ } else if len(buf) == 0 && rowid.Valid {
+ // we are sure empty valid accounts do not exist in the database.
+ return fmt.Errorf("account %v exists but has no data in the database", addr)
}
// we don't have that account, just return the database round.
return nil
@@ -522,13 +481,15 @@ func (qs *onlineAccountsDbQueries) LookupOnline(addr basics.Address, rnd basics.
return
}
-func (qs *onlineAccountsDbQueries) LookupOnlineTotalsHistory(round basics.Round) (basics.MicroAlgos, error) {
+func (qs *onlineAccountsDbQueries) LookupOnlineRoundParams(round basics.Round) (ledgercore.OnlineRoundParamsData, error) {
data := ledgercore.OnlineRoundParamsData{}
err := db.Retry(func() error {
row := qs.lookupOnlineTotalsStmt.QueryRow(round)
var buf []byte
err := row.Scan(&buf)
- if err != nil {
+ if err == sql.ErrNoRows {
+ return trackerdb.ErrNotFound
+ } else if err != nil {
return err
}
err = protocol.Decode(buf, &data)
@@ -537,7 +498,10 @@ func (qs *onlineAccountsDbQueries) LookupOnlineTotalsHistory(round basics.Round)
}
return nil
})
- return basics.MicroAlgos{Raw: data.OnlineSupply}, err
+ if err != nil {
+ return ledgercore.OnlineRoundParamsData{}, err
+ }
+ return data, nil
}
func (qs *onlineAccountsDbQueries) LookupOnlineHistory(addr basics.Address) (result []trackerdb.PersistedOnlineAccountData, rnd basics.Round, err error) {
@@ -552,7 +516,7 @@ func (qs *onlineAccountsDbQueries) LookupOnlineHistory(addr basics.Address) (res
var buf []byte
data := trackerdb.PersistedOnlineAccountData{}
var rowid int64
- err := rows.Scan(&rowid, &data.UpdRound, &rnd, &buf)
+ err = rows.Scan(&rowid, &data.UpdRound, &rnd, &buf)
if err != nil {
return err
}
@@ -564,14 +528,13 @@ func (qs *onlineAccountsDbQueries) LookupOnlineHistory(addr basics.Address) (res
data.Addr = addr
result = append(result, data)
}
- return err
+ return nil
})
return
}
func (qs *accountsDbQueries) Close() {
preparedQueries := []**sql.Stmt{
- &qs.listCreatablesStmt,
&qs.lookupAccountStmt,
&qs.lookupResourcesStmt,
&qs.lookupAllResourcesStmt,
diff --git a/ledger/store/trackerdb/sqlitedriver/sql_test.go b/ledger/store/trackerdb/sqlitedriver/sql_test.go
index 3fe71be4f..7e4dff97d 100644
--- a/ledger/store/trackerdb/sqlitedriver/sql_test.go
+++ b/ledger/store/trackerdb/sqlitedriver/sql_test.go
@@ -17,8 +17,13 @@
package sqlitedriver
import (
+ "context"
+ "database/sql"
"testing"
+ "github.com/algorand/go-algorand/data/basics"
+ storetesting "github.com/algorand/go-algorand/ledger/store/testing"
+ "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/stretchr/testify/require"
)
@@ -53,3 +58,26 @@ func TestKeyPrefixIntervalPreprocessing(t *testing.T) {
require.Equal(t, tc.outputPrefixIncr, actualOutputPrefixIncr)
}
}
+
+// TestAccountsDbQueriesCreateClose tests to see that we can create the accountsDbQueries and close it.
+// it also verify that double-closing it doesn't create an issue.
+func TestAccountsDbQueriesCreateClose(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ dbs, _ := storetesting.DbOpenTest(t, true)
+ storetesting.SetDbLogging(t, dbs)
+ defer dbs.Close()
+
+ err := dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ AccountsInitTest(t, tx, make(map[basics.Address]basics.AccountData), protocol.ConsensusCurrentVersion)
+ return nil
+ })
+ require.NoError(t, err)
+ qs, err := AccountsInitDbQueries(dbs.Rdb.Handle)
+ require.NoError(t, err)
+ require.NotNil(t, qs.lookupAccountStmt)
+ qs.Close()
+ require.Nil(t, qs.lookupAccountStmt)
+ qs.Close()
+ require.Nil(t, qs.lookupAccountStmt)
+}
diff --git a/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go b/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go
new file mode 100644
index 000000000..34f4d363c
--- /dev/null
+++ b/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go
@@ -0,0 +1,343 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package sqlitedriver
+
+import (
+ "context"
+ "database/sql"
+ "testing"
+ "time"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/ledger/store/trackerdb"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util/db"
+)
+
+type trackerSQLStore struct {
+ pair db.Pair
+ trackerdb.Reader
+ trackerdb.Writer
+ trackerdb.Catchpoint
+}
+
+// Open opens the sqlite database store
+func Open(dbFilename string, dbMem bool, log logging.Logger) (store trackerdb.Store, err error) {
+ pair, err := db.OpenPair(dbFilename, dbMem)
+ if err != nil {
+ return
+ }
+ pair.Rdb.SetLogger(log)
+ pair.Wdb.SetLogger(log)
+ return MakeStore(pair), nil
+}
+
+// MakeStore crates a tracker SQL db from sql db handle.
+func MakeStore(pair db.Pair) trackerdb.Store {
+ return &trackerSQLStore{pair, &sqlReader{pair.Rdb.Handle}, &sqlWriter{pair.Wdb.Handle}, &sqlCatchpoint{pair.Wdb.Handle}}
+}
+
+func (s *trackerSQLStore) SetSynchronousMode(ctx context.Context, mode db.SynchronousMode, fullfsync bool) (err error) {
+ return s.pair.Wdb.SetSynchronousMode(ctx, mode, fullfsync)
+}
+
+func (s *trackerSQLStore) IsSharedCacheConnection() bool {
+ return s.pair.Wdb.IsSharedCacheConnection()
+}
+
+func (s *trackerSQLStore) Batch(fn trackerdb.BatchFn) (err error) {
+ return s.BatchContext(context.Background(), fn)
+}
+
+func (s *trackerSQLStore) BatchContext(ctx context.Context, fn trackerdb.BatchFn) (err error) {
+ return s.pair.Wdb.AtomicContext(ctx, func(ctx context.Context, tx *sql.Tx) error {
+ return fn(ctx, &sqlBatchScope{tx, false, &sqlWriter{tx}})
+ })
+}
+
+func (s *trackerSQLStore) BeginBatch(ctx context.Context) (trackerdb.Batch, error) {
+ handle, err := s.pair.Wdb.Handle.BeginTx(ctx, nil)
+ if err != nil {
+ return nil, err
+ }
+ return &sqlBatchScope{handle, false, &sqlWriter{handle}}, nil
+}
+
+func (s *trackerSQLStore) Snapshot(fn trackerdb.SnapshotFn) (err error) {
+ return s.SnapshotContext(context.Background(), fn)
+}
+
+func (s *trackerSQLStore) SnapshotContext(ctx context.Context, fn trackerdb.SnapshotFn) (err error) {
+ return s.pair.Rdb.AtomicContext(ctx, func(ctx context.Context, tx *sql.Tx) error {
+ return fn(ctx, &sqlSnapshotScope{tx, &sqlReader{tx}})
+ })
+}
+
+func (s *trackerSQLStore) BeginSnapshot(ctx context.Context) (trackerdb.Snapshot, error) {
+ handle, err := s.pair.Rdb.Handle.BeginTx(ctx, nil)
+ if err != nil {
+ return nil, err
+ }
+ return &sqlSnapshotScope{handle, &sqlReader{handle}}, nil
+}
+
+func (s *trackerSQLStore) Transaction(fn trackerdb.TransactionFn) (err error) {
+ return s.TransactionContext(context.Background(), fn)
+}
+
+func (s *trackerSQLStore) TransactionContext(ctx context.Context, fn trackerdb.TransactionFn) (err error) {
+ return s.pair.Wdb.AtomicContext(ctx, func(ctx context.Context, tx *sql.Tx) error {
+ return fn(ctx, &sqlTransactionScope{tx, false, &sqlReader{tx}, &sqlWriter{tx}, &sqlCatchpoint{tx}})
+ })
+}
+
+func (s *trackerSQLStore) BeginTransaction(ctx context.Context) (trackerdb.Transaction, error) {
+ handle, err := s.pair.Wdb.Handle.BeginTx(ctx, nil)
+ if err != nil {
+ return nil, err
+ }
+ return &sqlTransactionScope{handle, false, &sqlReader{handle}, &sqlWriter{handle}, &sqlCatchpoint{handle}}, nil
+}
+
+func (s trackerSQLStore) RunMigrations(ctx context.Context, params trackerdb.Params, log logging.Logger, targetVersion int32) (mgr trackerdb.InitParams, err error) {
+ err = s.pair.Wdb.AtomicContext(ctx, func(ctx context.Context, tx *sql.Tx) error {
+ mgr, err = RunMigrations(ctx, tx, params, log, targetVersion)
+ return err
+ })
+ return
+}
+
+// TODO: rename: this is a sqlite specific name, this could also be used to trigger compact on KV stores.
+// it seems to only be used during a v2 migration
+func (s *trackerSQLStore) Vacuum(ctx context.Context) (stats db.VacuumStats, err error) {
+ _, err = s.pair.Wdb.Vacuum(ctx)
+ return
+}
+
+func (s *trackerSQLStore) ResetToV6Test(ctx context.Context) error {
+ var resetExprs = []string{
+ `DROP TABLE IF EXISTS onlineaccounts`,
+ `DROP TABLE IF EXISTS txtail`,
+ `DROP TABLE IF EXISTS onlineroundparamstail`,
+ `DROP TABLE IF EXISTS catchpointfirststageinfo`,
+ }
+
+ return s.pair.Wdb.AtomicContext(ctx, func(ctx context.Context, tx *sql.Tx) error {
+ for _, stmt := range resetExprs {
+ _, err := tx.ExecContext(ctx, stmt)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+}
+
+func (s *trackerSQLStore) Close() {
+ s.pair.Close()
+}
+
+type sqlReader struct {
+ q db.Queryable
+}
+
+// MakeAccountsOptimizedReader implements trackerdb.Reader
+func (r *sqlReader) MakeAccountsOptimizedReader() (trackerdb.AccountsReader, error) {
+ return AccountsInitDbQueries(r.q)
+}
+
+// MakeAccountsReader implements trackerdb.Reader
+func (r *sqlReader) MakeAccountsReader() (trackerdb.AccountsReaderExt, error) {
+ // TODO: create and use a make accounts reader that takes just a queryable
+ return NewAccountsSQLReader(r.q), nil
+}
+
+// MakeOnlineAccountsOptimizedReader implements trackerdb.Reader
+func (r *sqlReader) MakeOnlineAccountsOptimizedReader() (trackerdb.OnlineAccountsReader, error) {
+ return OnlineAccountsInitDbQueries(r.q)
+}
+
+// MakeSpVerificationCtxReader implements trackerdb.Reader
+func (r *sqlReader) MakeSpVerificationCtxReader() trackerdb.SpVerificationCtxReader {
+ return makeStateProofVerificationReader(r.q)
+}
+
+// MakeCatchpointPendingHashesIterator implements trackerdb.Reader
+func (r *sqlReader) MakeCatchpointPendingHashesIterator(hashCount int) trackerdb.CatchpointPendingHashesIter {
+ return MakeCatchpointPendingHashesIterator(hashCount, r.q)
+}
+
+// MakeCatchpointReader implements trackerdb.Reader
+func (r *sqlReader) MakeCatchpointReader() (trackerdb.CatchpointReader, error) {
+ return makeCatchpointReader(r.q), nil
+}
+
+// MakeEncodedAccoutsBatchIter implements trackerdb.Reader
+func (r *sqlReader) MakeEncodedAccoutsBatchIter() trackerdb.EncodedAccountsBatchIter {
+ return MakeEncodedAccoutsBatchIter(r.q)
+}
+
+// MakeKVsIter implements trackerdb.Reader
+func (r *sqlReader) MakeKVsIter(ctx context.Context) (trackerdb.KVsIter, error) {
+ return MakeKVsIter(ctx, r.q)
+}
+
+type sqlWriter struct {
+ e db.Executable
+}
+
+// MakeAccountsOptimizedWriter implements trackerdb.Writer
+func (w *sqlWriter) MakeAccountsOptimizedWriter(hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (trackerdb.AccountsWriter, error) {
+ return MakeAccountsSQLWriter(w.e, hasAccounts, hasResources, hasKvPairs, hasCreatables)
+}
+
+// MakeAccountsWriter implements trackerdb.Writer
+func (w *sqlWriter) MakeAccountsWriter() (trackerdb.AccountsWriterExt, error) {
+ return NewAccountsSQLReaderWriter(w.e), nil
+}
+
+// MakeOnlineAccountsOptimizedWriter implements trackerdb.Writer
+func (w *sqlWriter) MakeOnlineAccountsOptimizedWriter(hasAccounts bool) (trackerdb.OnlineAccountsWriter, error) {
+ return MakeOnlineAccountsSQLWriter(w.e, hasAccounts)
+}
+
+// MakeSpVerificationCtxWriter implements trackerdb.Writer
+func (w *sqlWriter) MakeSpVerificationCtxWriter() trackerdb.SpVerificationCtxWriter {
+ return makeStateProofVerificationWriter(w.e)
+}
+
+// Testing implements trackerdb.Writer
+func (w *sqlWriter) Testing() trackerdb.WriterTestExt {
+ return w
+}
+
+// AccountsInitLightTest implements trackerdb.WriterTestExt
+func (w *sqlWriter) AccountsInitLightTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error) {
+ return AccountsInitLightTest(tb, w.e, initAccounts, proto)
+}
+
+// AccountsInitTest implements trackerdb.WriterTestExt
+func (w *sqlWriter) AccountsInitTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) {
+ return AccountsInitTest(tb, w.e, initAccounts, proto)
+}
+
+// AccountsUpdateSchemaTest implements trackerdb.WriterTestExt
+func (w *sqlWriter) AccountsUpdateSchemaTest(ctx context.Context) (err error) {
+ return AccountsUpdateSchemaTest(ctx, w.e)
+}
+
+// ModifyAcctBaseTest implements trackerdb.WriterTestExt
+func (w *sqlWriter) ModifyAcctBaseTest() error {
+ return modifyAcctBaseTest(w.e)
+}
+
+type sqlCatchpoint struct {
+ e db.Executable
+}
+
+// MakeCatchpointReaderWriter implements trackerdb.Catchpoint
+func (c *sqlCatchpoint) MakeCatchpointReaderWriter() (trackerdb.CatchpointReaderWriter, error) {
+ return NewCatchpointSQLReaderWriter(c.e), nil
+}
+
+// MakeCatchpointWriter implements trackerdb.Catchpoint
+func (c *sqlCatchpoint) MakeCatchpointWriter() (trackerdb.CatchpointWriter, error) {
+ return NewCatchpointSQLReaderWriter(c.e), nil
+}
+
+// MakeMerkleCommitter implements trackerdb.Catchpoint
+func (c *sqlCatchpoint) MakeMerkleCommitter(staging bool) (trackerdb.MerkleCommitter, error) {
+ return MakeMerkleCommitter(c.e, staging)
+}
+
+// MakeOrderedAccountsIter implements trackerdb.Catchpoint
+func (c *sqlCatchpoint) MakeOrderedAccountsIter(accountCount int) trackerdb.OrderedAccountsIter {
+ return MakeOrderedAccountsIter(c.e, accountCount)
+}
+
+type sqlBatchScope struct {
+ tx *sql.Tx
+ committed bool
+ trackerdb.Writer
+}
+
+func (bs *sqlBatchScope) ResetTransactionWarnDeadline(ctx context.Context, deadline time.Time) (prevDeadline time.Time, err error) {
+ return db.ResetTransactionWarnDeadline(ctx, bs.tx, deadline)
+}
+
+func (bs *sqlBatchScope) Close() error {
+ if !bs.committed {
+ return bs.tx.Rollback()
+ }
+ return nil
+}
+
+func (bs *sqlBatchScope) Commit() error {
+ err := bs.tx.Commit()
+ if err != nil {
+ return err
+ }
+ bs.committed = true
+ return nil
+}
+
+type sqlSnapshotScope struct {
+ tx *sql.Tx
+ trackerdb.Reader
+}
+
+func (ss *sqlSnapshotScope) ResetTransactionWarnDeadline(ctx context.Context, deadline time.Time) (prevDeadline time.Time, err error) {
+ return db.ResetTransactionWarnDeadline(ctx, ss.tx, deadline)
+}
+
+func (ss *sqlSnapshotScope) Close() error {
+ return ss.tx.Rollback()
+}
+
+type sqlTransactionScope struct {
+ tx *sql.Tx
+ committed bool
+ trackerdb.Reader
+ trackerdb.Writer
+ trackerdb.Catchpoint
+}
+
+func (txs *sqlTransactionScope) RunMigrations(ctx context.Context, params trackerdb.Params, log logging.Logger, targetVersion int32) (mgr trackerdb.InitParams, err error) {
+ return RunMigrations(ctx, txs.tx, params, log, targetVersion)
+}
+
+func (txs *sqlTransactionScope) ResetTransactionWarnDeadline(ctx context.Context, deadline time.Time) (prevDeadline time.Time, err error) {
+ return db.ResetTransactionWarnDeadline(ctx, txs.tx, deadline)
+}
+
+func (txs *sqlTransactionScope) Close() error {
+ if !txs.committed {
+ return txs.tx.Rollback()
+ }
+ return nil
+}
+
+func (txs *sqlTransactionScope) Commit() error {
+ err := txs.tx.Commit()
+ if err != nil {
+ return err
+ }
+ txs.committed = true
+ return nil
+}
diff --git a/ledger/store/trackerdb/sqlitedriver/store_sqlite_impl.go b/ledger/store/trackerdb/sqlitedriver/store_sqlite_impl.go
deleted file mode 100644
index 74327be2b..000000000
--- a/ledger/store/trackerdb/sqlitedriver/store_sqlite_impl.go
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright (C) 2019-2023 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package sqlitedriver
-
-import (
- "context"
- "database/sql"
- "os"
- "testing"
- "time"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/ledger/store/trackerdb"
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/util/db"
-)
-
-type trackerSQLStore struct {
- // expose the internals for now so we can slowly change the code depending on them
- pair db.Pair
-}
-
-type sqlBatchScope struct {
- tx *sql.Tx
-}
-
-type sqlSnapshotScope struct {
- tx *sql.Tx
-}
-
-type sqlTransactionScope struct {
- tx *sql.Tx
-}
-
-// OpenTrackerSQLStore opens the sqlite database store
-func OpenTrackerSQLStore(dbFilename string, dbMem bool) (store *trackerSQLStore, err error) {
- db, err := db.OpenPair(dbFilename, dbMem)
- if err != nil {
- return
- }
-
- return &trackerSQLStore{db}, nil
-}
-
-// CreateTrackerSQLStore crates a tracker SQL db from sql db handle.
-func CreateTrackerSQLStore(pair db.Pair) *trackerSQLStore {
- return &trackerSQLStore{pair}
-}
-
-// SetLogger sets the Logger, mainly for unit test quietness
-func (s *trackerSQLStore) SetLogger(log logging.Logger) {
- s.pair.Rdb.SetLogger(log)
- s.pair.Wdb.SetLogger(log)
-}
-
-func (s *trackerSQLStore) SetSynchronousMode(ctx context.Context, mode db.SynchronousMode, fullfsync bool) (err error) {
- return s.pair.Wdb.SetSynchronousMode(ctx, mode, fullfsync)
-}
-
-func (s *trackerSQLStore) IsSharedCacheConnection() bool {
- return s.pair.Wdb.IsSharedCacheConnection()
-}
-
-func (s *trackerSQLStore) Batch(fn trackerdb.BatchFn) (err error) {
- return s.BatchContext(context.Background(), fn)
-}
-
-func (s *trackerSQLStore) BatchContext(ctx context.Context, fn trackerdb.BatchFn) (err error) {
- return s.pair.Wdb.AtomicContext(ctx, func(ctx context.Context, tx *sql.Tx) error {
- return fn(ctx, sqlBatchScope{tx})
- })
-}
-
-func (s *trackerSQLStore) Snapshot(fn trackerdb.SnapshotFn) (err error) {
- return s.SnapshotContext(context.Background(), fn)
-}
-
-func (s *trackerSQLStore) SnapshotContext(ctx context.Context, fn trackerdb.SnapshotFn) (err error) {
- return s.pair.Rdb.AtomicContext(ctx, func(ctx context.Context, tx *sql.Tx) error {
- return fn(ctx, sqlSnapshotScope{tx})
- })
-}
-
-func (s *trackerSQLStore) Transaction(fn trackerdb.TransactionFn) (err error) {
- return s.TransactionContext(context.Background(), fn)
-}
-
-func (s *trackerSQLStore) TransactionContext(ctx context.Context, fn trackerdb.TransactionFn) (err error) {
- return s.pair.Wdb.AtomicContext(ctx, func(ctx context.Context, tx *sql.Tx) error {
- return fn(ctx, sqlTransactionScope{tx})
- })
-}
-
-func (s *trackerSQLStore) MakeAccountsOptimizedReader() (trackerdb.AccountsReader, error) {
- return AccountsInitDbQueries(s.pair.Rdb.Handle)
-}
-
-func (s *trackerSQLStore) MakeOnlineAccountsOptimizedReader() (trackerdb.OnlineAccountsReader, error) {
- return OnlineAccountsInitDbQueries(s.pair.Rdb.Handle)
-}
-
-func (s *trackerSQLStore) MakeCatchpointReaderWriter() (trackerdb.CatchpointReaderWriter, error) {
- w := NewCatchpointSQLReaderWriter(s.pair.Wdb.Handle)
- return w, nil
-}
-
-// TODO: rename: this is a sqlite specific name, this could also be used to trigger compact on KV stores.
-// it seems to only be used during a v2 migration
-func (s *trackerSQLStore) Vacuum(ctx context.Context) (stats db.VacuumStats, err error) {
- _, err = s.pair.Wdb.Vacuum(ctx)
- return
-}
-
-func (s *trackerSQLStore) CleanupTest(dbName string, inMemory bool) {
- s.pair.Close()
- if !inMemory {
- os.Remove(dbName)
- }
-}
-
-func (s *trackerSQLStore) ResetToV6Test(ctx context.Context) error {
- var resetExprs = []string{
- `DROP TABLE IF EXISTS onlineaccounts`,
- `DROP TABLE IF EXISTS txtail`,
- `DROP TABLE IF EXISTS onlineroundparamstail`,
- `DROP TABLE IF EXISTS catchpointfirststageinfo`,
- }
-
- return s.pair.Wdb.AtomicContext(ctx, func(ctx context.Context, tx *sql.Tx) error {
- for _, stmt := range resetExprs {
- _, err := tx.ExecContext(ctx, stmt)
- if err != nil {
- return err
- }
- }
- return nil
- })
-}
-
-func (s *trackerSQLStore) Close() {
- s.pair.Close()
-}
-
-// Testing returns this scope, exposed as an interface with test functions
-func (txs sqlTransactionScope) Testing() trackerdb.TestTransactionScope {
- return txs
-}
-
-func (txs sqlTransactionScope) MakeCatchpointReaderWriter() (trackerdb.CatchpointReaderWriter, error) {
- return NewCatchpointSQLReaderWriter(txs.tx), nil
-}
-
-func (txs sqlTransactionScope) MakeAccountsReaderWriter() (trackerdb.AccountsReaderWriter, error) {
- return NewAccountsSQLReaderWriter(txs.tx), nil
-}
-
-// implements Testing interface
-func (txs sqlTransactionScope) MakeAccountsOptimizedReader() (trackerdb.AccountsReader, error) {
- return AccountsInitDbQueries(txs.tx)
-}
-
-func (txs sqlTransactionScope) MakeAccountsOptimizedWriter(hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (trackerdb.AccountsWriter, error) {
- return MakeAccountsSQLWriter(txs.tx, hasAccounts, hasResources, hasKvPairs, hasCreatables)
-}
-
-func (txs sqlTransactionScope) MakeOnlineAccountsOptimizedWriter(hasAccounts bool) (w trackerdb.OnlineAccountsWriter, err error) {
- return MakeOnlineAccountsSQLWriter(txs.tx, hasAccounts)
-}
-
-// implements Testing interface
-func (txs sqlTransactionScope) MakeOnlineAccountsOptimizedReader() (r trackerdb.OnlineAccountsReader, err error) {
- return OnlineAccountsInitDbQueries(txs.tx)
-}
-
-func (txs sqlTransactionScope) MakeMerkleCommitter(staging bool) (trackerdb.MerkleCommitter, error) {
- return MakeMerkleCommitter(txs.tx, staging)
-}
-
-func (txs sqlTransactionScope) MakeOrderedAccountsIter(accountCount int) trackerdb.OrderedAccountsIter {
- return MakeOrderedAccountsIter(txs.tx, accountCount)
-}
-
-func (txs sqlTransactionScope) MakeKVsIter(ctx context.Context) (trackerdb.KVsIter, error) {
- return MakeKVsIter(ctx, txs.tx)
-}
-
-func (txs sqlTransactionScope) MakeEncodedAccoutsBatchIter() trackerdb.EncodedAccountsBatchIter {
- return MakeEncodedAccoutsBatchIter(txs.tx)
-}
-
-func (txs sqlTransactionScope) MakeSpVerificationCtxReaderWriter() trackerdb.SpVerificationCtxReaderWriter {
- return makeStateProofVerificationReaderWriter(txs.tx, txs.tx)
-}
-
-func (txs sqlTransactionScope) RunMigrations(ctx context.Context, params trackerdb.Params, log logging.Logger, targetVersion int32) (mgr trackerdb.InitParams, err error) {
- return RunMigrations(ctx, txs.tx, params, log, targetVersion)
-}
-
-func (txs sqlTransactionScope) ResetTransactionWarnDeadline(ctx context.Context, deadline time.Time) (prevDeadline time.Time, err error) {
- return db.ResetTransactionWarnDeadline(ctx, txs.tx, deadline)
-}
-
-// implements Testing interface
-func (txs sqlTransactionScope) AccountsInitTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) {
- return AccountsInitTest(tb, txs.tx, initAccounts, proto)
-}
-
-// implements Testing interface
-func (txs sqlTransactionScope) AccountsInitLightTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error) {
- return AccountsInitLightTest(tb, txs.tx, initAccounts, proto)
-}
-
-// Testing returns this scope, exposed as an interface with test functions
-func (bs sqlBatchScope) Testing() trackerdb.TestBatchScope {
- return bs
-}
-
-func (bs sqlBatchScope) MakeCatchpointWriter() (trackerdb.CatchpointWriter, error) {
- return NewCatchpointSQLReaderWriter(bs.tx), nil
-}
-
-func (bs sqlBatchScope) MakeAccountsWriter() (trackerdb.AccountsWriterExt, error) {
- return NewAccountsSQLReaderWriter(bs.tx), nil
-}
-
-func (bs sqlBatchScope) MakeAccountsOptimizedWriter(hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (trackerdb.AccountsWriter, error) {
- return MakeAccountsSQLWriter(bs.tx, hasAccounts, hasResources, hasKvPairs, hasCreatables)
-}
-
-// implements Testing interface
-func (bs sqlBatchScope) RunMigrations(ctx context.Context, params trackerdb.Params, log logging.Logger, targetVersion int32) (mgr trackerdb.InitParams, err error) {
- return RunMigrations(ctx, bs.tx, params, log, targetVersion)
-}
-
-func (bs sqlBatchScope) ResetTransactionWarnDeadline(ctx context.Context, deadline time.Time) (prevDeadline time.Time, err error) {
- return db.ResetTransactionWarnDeadline(ctx, bs.tx, deadline)
-}
-
-// implements Testing interface
-func (bs sqlBatchScope) AccountsInitTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) {
- return AccountsInitTest(tb, bs.tx, initAccounts, proto)
-}
-
-// implements Testing interface
-func (bs sqlBatchScope) ModifyAcctBaseTest() error {
- return modifyAcctBaseTest(bs.tx)
-}
-
-// implements Testing interface
-func (bs sqlBatchScope) AccountsUpdateSchemaTest(ctx context.Context) (err error) {
- return AccountsUpdateSchemaTest(ctx, bs.tx)
-}
-
-func (bs sqlBatchScope) MakeSpVerificationCtxWriter() trackerdb.SpVerificationCtxWriter {
- return makeStateProofVerificationWriter(bs.tx)
-}
-
-func (ss sqlSnapshotScope) MakeAccountsReader() (trackerdb.AccountsReaderExt, error) {
- return NewAccountsSQLReaderWriter(ss.tx), nil
-}
-
-func (ss sqlSnapshotScope) MakeCatchpointReader() (trackerdb.CatchpointReader, error) {
- return NewCatchpointSQLReaderWriter(ss.tx), nil
-}
-
-func (ss sqlSnapshotScope) MakeCatchpointPendingHashesIterator(hashCount int) trackerdb.CatchpointPendingHashesIter {
- return MakeCatchpointPendingHashesIterator(hashCount, ss.tx)
-}
-
-func (ss sqlSnapshotScope) MakeSpVerificationCtxReader() trackerdb.SpVerificationCtxReader {
- return makeStateProofVerificationReader(ss.tx)
-}
diff --git a/ledger/store/trackerdb/sqlitedriver/testing.go b/ledger/store/trackerdb/sqlitedriver/testing.go
index 8d0a61afd..4fed5e472 100644
--- a/ledger/store/trackerdb/sqlitedriver/testing.go
+++ b/ledger/store/trackerdb/sqlitedriver/testing.go
@@ -18,7 +18,6 @@ package sqlitedriver
import (
"context"
- "database/sql"
"fmt"
"strings"
"testing"
@@ -33,59 +32,55 @@ import (
"github.com/stretchr/testify/require"
)
-// DbOpenTrackerTest opens a sqlite db file for testing purposes.
-func DbOpenTrackerTest(t testing.TB, inMemory bool) (trackerdb.TrackerStore, string) {
- fn := fmt.Sprintf("%s.%d", strings.ReplaceAll(t.Name(), "/", "."), crypto.RandUint64())
+// OpenForTesting opens a sqlite db file for testing purposes.
+// It set the logging to the test logger and uses a tmp directory associated to the test for the db.
+// The test tmp direction is automatically cleaned up by the golang test framework.
+func OpenForTesting(t testing.TB, inMemory bool) (trackerdb.Store, string) {
+ fn := fmt.Sprintf("%s/%s.%d", t.TempDir(), strings.ReplaceAll(t.Name(), "/", "."), crypto.RandUint64())
- dbs, err := db.OpenPair(fn, inMemory)
+ store, err := Open(fn, inMemory, logging.TestingLog(t))
require.NoErrorf(t, err, "Filename : %s\nInMemory: %v", fn, inMemory)
- return &trackerSQLStore{dbs}, fn
-}
-
-// SetDbTrackerTestLogging sets a testing logger on a database.
-func SetDbTrackerTestLogging(t testing.TB, dbs trackerdb.TrackerStore) {
- dblogger := logging.TestingLog(t)
- dbs.SetLogger(dblogger)
+ return store, fn
}
// AccountsInitLightTest initializes an empty database for testing without the extra methods being called.
// implements Testing interface, test function only
-func AccountsInitLightTest(tb testing.TB, tx *sql.Tx, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error) {
- newDB, err := accountsInit(tx, initAccounts, proto)
+func AccountsInitLightTest(tb testing.TB, e db.Executable, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error) {
+ newDB, err := accountsInit(e, initAccounts, proto)
require.NoError(tb, err)
return newDB, err
}
// modifyAcctBaseTest tweaks the database to move backards.
// implements Testing interface, test function only
-func modifyAcctBaseTest(tx *sql.Tx) error {
- _, err := tx.Exec("update acctrounds set rnd = 1 WHERE id='acctbase' ")
+func modifyAcctBaseTest(e db.Executable) error {
+ _, err := e.Exec("update acctrounds set rnd = 1 WHERE id='acctbase' ")
return err
}
// AccountsInitTest initializes an empty database for testing.
// implements Testing interface, test function only
-func AccountsInitTest(tb testing.TB, tx *sql.Tx, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) {
- newDB, err := accountsInit(tx, initAccounts, config.Consensus[proto])
+func AccountsInitTest(tb testing.TB, e db.Executable, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool) {
+ newDB, err := accountsInit(e, initAccounts, config.Consensus[proto])
require.NoError(tb, err)
- err = accountsAddNormalizedBalance(tx, config.Consensus[proto])
+ err = accountsAddNormalizedBalance(e, config.Consensus[proto])
require.NoError(tb, err)
- err = accountsCreateResourceTable(context.Background(), tx)
+ err = accountsCreateResourceTable(context.Background(), e)
require.NoError(tb, err)
- err = performResourceTableMigration(context.Background(), tx, nil)
+ err = performResourceTableMigration(context.Background(), e, nil)
require.NoError(tb, err)
- err = accountsCreateOnlineAccountsTable(context.Background(), tx)
+ err = accountsCreateOnlineAccountsTable(context.Background(), e)
require.NoError(tb, err)
- err = accountsCreateTxTailTable(context.Background(), tx)
+ err = accountsCreateTxTailTable(context.Background(), e)
require.NoError(tb, err)
- err = performOnlineAccountsTableMigration(context.Background(), tx, nil, nil)
+ err = performOnlineAccountsTableMigration(context.Background(), e, nil, nil)
require.NoError(tb, err)
// since this is a test that starts from genesis, there is no tail that needs to be migrated.
@@ -94,42 +89,39 @@ func AccountsInitTest(tb testing.TB, tx *sql.Tx, initAccounts map[basics.Address
err = performTxTailTableMigration(context.Background(), nil, db.Accessor{})
require.NoError(tb, err)
- err = accountsCreateOnlineRoundParamsTable(context.Background(), tx)
+ err = accountsCreateOnlineRoundParamsTable(context.Background(), e)
require.NoError(tb, err)
- err = performOnlineRoundParamsTailMigration(context.Background(), tx, db.Accessor{}, true, proto)
+ err = performOnlineRoundParamsTailMigration(context.Background(), e, db.Accessor{}, true, proto)
require.NoError(tb, err)
- err = accountsCreateBoxTable(context.Background(), tx)
+ err = accountsCreateBoxTable(context.Background(), e)
require.NoError(tb, err)
- err = performKVStoreNullBlobConversion(context.Background(), tx)
+ err = performKVStoreNullBlobConversion(context.Background(), e)
require.NoError(tb, err)
return newDB
}
// AccountsUpdateSchemaTest adds some empty tables for tests to work with a "v6" store.
-func AccountsUpdateSchemaTest(ctx context.Context, tx *sql.Tx) (err error) {
- if err := accountsCreateOnlineAccountsTable(ctx, tx); err != nil {
+func AccountsUpdateSchemaTest(ctx context.Context, e db.Executable) (err error) {
+ if err := accountsCreateOnlineAccountsTable(ctx, e); err != nil {
return err
}
- if err := accountsCreateTxTailTable(ctx, tx); err != nil {
+ if err := accountsCreateTxTailTable(ctx, e); err != nil {
return err
}
- if err := accountsCreateOnlineRoundParamsTable(ctx, tx); err != nil {
+ if err := accountsCreateOnlineRoundParamsTable(ctx, e); err != nil {
return err
}
- if err := accountsCreateCatchpointFirstStageInfoTable(ctx, tx); err != nil {
+ if err := accountsCreateCatchpointFirstStageInfoTable(ctx, e); err != nil {
return err
}
// this line creates kvstore table, even if it is not required in accountDBVersion 6 -> 7
// or in later version where we need kvstore table, some tests will fail
- if err := accountsCreateBoxTable(ctx, tx); err != nil {
- return err
- }
- if err := createStateProofVerificationTable(ctx, tx); err != nil {
+ if err := accountsCreateBoxTable(ctx, e); err != nil {
return err
}
- return nil
+ return createStateProofVerificationTable(ctx, e)
}
diff --git a/ledger/store/trackerdb/sqlitedriver/trackerdbV2.go b/ledger/store/trackerdb/sqlitedriver/trackerdbV2.go
index 2c7a370f0..0f59a5ce5 100644
--- a/ledger/store/trackerdb/sqlitedriver/trackerdbV2.go
+++ b/ledger/store/trackerdb/sqlitedriver/trackerdbV2.go
@@ -18,7 +18,6 @@ package sqlitedriver
import (
"context"
- "database/sql"
"encoding/hex"
"fmt"
"os"
@@ -50,9 +49,9 @@ type trackerDBSchemaInitializer struct {
// RunMigrations initializes the accounts DB if needed and return current account round.
// as part of the initialization, it tests the current database schema version, and perform upgrade
// procedures to bring it up to the database schema supported by the binary.
-func RunMigrations(ctx context.Context, tx *sql.Tx, params trackerdb.Params, log logging.Logger, targetVersion int32) (mgr trackerdb.InitParams, err error) {
+func RunMigrations(ctx context.Context, e db.Executable, params trackerdb.Params, log logging.Logger, targetVersion int32) (mgr trackerdb.InitParams, err error) {
// check current database version.
- dbVersion, err := db.GetUserVersion(ctx, tx)
+ dbVersion, err := db.GetUserVersion(ctx, e)
if err != nil {
return trackerdb.InitParams{}, fmt.Errorf("trackerDBInitialize unable to read database schema version : %v", err)
}
@@ -78,61 +77,61 @@ func RunMigrations(ctx context.Context, tx *sql.Tx, params trackerdb.Params, log
// perform the initialization/upgrade
switch tu.version() {
case 0:
- err = tu.upgradeDatabaseSchema0(ctx, tx)
+ err = tu.upgradeDatabaseSchema0(ctx, e)
if err != nil {
tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 0 : %v", err)
return
}
case 1:
- err = tu.upgradeDatabaseSchema1(ctx, tx)
+ err = tu.upgradeDatabaseSchema1(ctx, e)
if err != nil {
tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 1 : %v", err)
return
}
case 2:
- err = tu.upgradeDatabaseSchema2(ctx, tx)
+ err = tu.upgradeDatabaseSchema2(ctx, e)
if err != nil {
tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 2 : %v", err)
return
}
case 3:
- err = tu.upgradeDatabaseSchema3(ctx, tx)
+ err = tu.upgradeDatabaseSchema3(ctx, e)
if err != nil {
tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 3 : %v", err)
return
}
case 4:
- err = tu.upgradeDatabaseSchema4(ctx, tx)
+ err = tu.upgradeDatabaseSchema4(ctx, e)
if err != nil {
tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 4 : %v", err)
return
}
case 5:
- err = tu.upgradeDatabaseSchema5(ctx, tx)
+ err = tu.upgradeDatabaseSchema5(ctx, e)
if err != nil {
tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 5 : %v", err)
return
}
case 6:
- err = tu.upgradeDatabaseSchema6(ctx, tx)
+ err = tu.upgradeDatabaseSchema6(ctx, e)
if err != nil {
tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 6 : %v", err)
return
}
case 7:
- err = tu.upgradeDatabaseSchema7(ctx, tx)
+ err = tu.upgradeDatabaseSchema7(ctx, e)
if err != nil {
tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 7 : %v", err)
return
}
case 8:
- err = tu.upgradeDatabaseSchema8(ctx, tx)
+ err = tu.upgradeDatabaseSchema8(ctx, e)
if err != nil {
tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 8 : %v", err)
return
}
case 9:
- err = tu.upgradeDatabaseSchema9(ctx, tx)
+ err = tu.upgradeDatabaseSchema9(ctx, e)
if err != nil {
tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 9 : %v", err)
return
@@ -147,10 +146,10 @@ func RunMigrations(ctx context.Context, tx *sql.Tx, params trackerdb.Params, log
return trackerdb.InitParams{SchemaVersion: tu.schemaVersion, VacuumOnStartup: tu.vacuumOnStartup}, nil
}
-func (tu *trackerDBSchemaInitializer) setVersion(ctx context.Context, tx *sql.Tx, version int32) (err error) {
+func (tu *trackerDBSchemaInitializer) setVersion(ctx context.Context, e db.Executable, version int32) (err error) {
oldVersion := tu.schemaVersion
tu.schemaVersion = version
- _, err = db.SetUserVersion(ctx, tx, tu.schemaVersion)
+ _, err = db.SetUserVersion(ctx, e, tu.schemaVersion)
if err != nil {
return fmt.Errorf("trackerDBInitialize unable to update database schema version from %d to %d: %v", oldVersion, version, err)
}
@@ -180,13 +179,13 @@ func (tu trackerDBSchemaInitializer) version() int32 {
// The accountbase would get initialized with the au.initAccounts
// The accounttotals would get initialized to align with the initialization account added to accountbase
// The acctrounds would get updated to indicate that the balance matches round 0
-func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema0(ctx context.Context, tx *sql.Tx) (err error) {
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema0(ctx context.Context, e db.Executable) (err error) {
tu.log.Infof("upgradeDatabaseSchema0 initializing schema")
- tu.newDatabase, err = accountsInit(tx, tu.InitAccounts, config.Consensus[tu.InitProto])
+ tu.newDatabase, err = accountsInit(e, tu.InitAccounts, config.Consensus[tu.InitProto])
if err != nil {
return fmt.Errorf("upgradeDatabaseSchema0 unable to initialize schema : %v", err)
}
- return tu.setVersion(ctx, tx, 1)
+ return tu.setVersion(ctx, e, 1)
}
// upgradeDatabaseSchema1 upgrades the database schema from version 1 to version 2
@@ -204,7 +203,7 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema0(ctx context.Context
//
// This upgrade doesn't change any of the actual database schema ( i.e. tables, indexes ) but rather just performing
// a functional update to it's content.
-func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema1(ctx context.Context, tx *sql.Tx) (err error) {
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema1(ctx context.Context, e db.Executable) (err error) {
var modifiedAccounts uint
if tu.newDatabase {
goto schemaUpdateComplete
@@ -212,14 +211,14 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema1(ctx context.Context
// update accounts encoding.
tu.log.Infof("upgradeDatabaseSchema1 verifying accounts data encoding")
- modifiedAccounts, err = reencodeAccounts(ctx, tx)
+ modifiedAccounts, err = reencodeAccounts(ctx, e)
if err != nil {
return err
}
if modifiedAccounts > 0 {
- crw := NewCatchpointSQLReaderWriter(tx)
- arw := NewAccountsSQLReaderWriter(tx)
+ crw := NewCatchpointSQLReaderWriter(e)
+ arw := NewAccountsSQLReaderWriter(e)
tu.log.Infof("upgradeDatabaseSchema1 reencoded %d accounts", modifiedAccounts)
@@ -249,7 +248,7 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema1(ctx context.Context
}
schemaUpdateComplete:
- return tu.setVersion(ctx, tx, 2)
+ return tu.setVersion(ctx, e, 2)
}
// upgradeDatabaseSchema2 upgrades the database schema from version 2 to version 3
@@ -257,30 +256,30 @@ schemaUpdateComplete:
// This upgrade only enables the database vacuuming which will take place once the upgrade process is complete.
// If the user has already specified the OptimizeAccountsDatabaseOnStartup flag in the configuration file, this
// step becomes a no-op.
-func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema2(ctx context.Context, tx *sql.Tx) (err error) {
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema2(ctx context.Context, e db.Executable) (err error) {
if !tu.newDatabase {
tu.vacuumOnStartup = true
}
// update version
- return tu.setVersion(ctx, tx, 3)
+ return tu.setVersion(ctx, e, 3)
}
// upgradeDatabaseSchema3 upgrades the database schema from version 3 to version 4,
// adding the normalizedonlinebalance column to the accountbase table.
-func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema3(ctx context.Context, tx *sql.Tx) (err error) {
- err = accountsAddNormalizedBalance(tx, config.Consensus[tu.InitProto])
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema3(ctx context.Context, e db.Executable) (err error) {
+ err = accountsAddNormalizedBalance(e, config.Consensus[tu.InitProto])
if err != nil {
return err
}
// update version
- return tu.setVersion(ctx, tx, 4)
+ return tu.setVersion(ctx, e, 4)
}
// upgradeDatabaseSchema4 does not change the schema but migrates data:
// remove empty AccountData entries from accountbase table
-func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema4(ctx context.Context, tx *sql.Tx) (err error) {
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema4(ctx context.Context, e db.Executable) (err error) {
var numDeleted int64
var addresses []basics.Address
@@ -288,13 +287,13 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema4(ctx context.Context
goto done
}
- numDeleted, addresses, err = removeEmptyAccountData(tx, tu.CatchpointEnabled)
+ numDeleted, addresses, err = removeEmptyAccountData(e, tu.CatchpointEnabled)
if err != nil {
return err
}
if tu.CatchpointEnabled && len(addresses) > 0 {
- mc, err := MakeMerkleCommitter(tx, false)
+ mc, err := MakeMerkleCommitter(e, false)
if err != nil {
// at this point record deleted and DB is pruned for account data
// if hash deletion fails just log it and do not abort startup
@@ -310,9 +309,9 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema4(ctx context.Context
var totalHashesDeleted int
for _, addr := range addresses {
hash := trackerdb.AccountHashBuilder(addr, basics.AccountData{}, []byte{0x80})
- deleted, err := trie.Delete(hash)
- if err != nil {
- tu.log.Errorf("upgradeDatabaseSchema4: failed to delete hash '%s' from merkle trie for account %v: %v", hex.EncodeToString(hash), addr, err)
+ deleted, delErr := trie.Delete(hash)
+ if delErr != nil {
+ tu.log.Errorf("upgradeDatabaseSchema4: failed to delete hash '%s' from merkle trie for account %v: %v", hex.EncodeToString(hash), addr, delErr)
} else {
if !deleted {
tu.log.Warnf("upgradeDatabaseSchema4: failed to delete hash '%s' from merkle trie for account %v", hex.EncodeToString(hash), addr)
@@ -332,15 +331,15 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema4(ctx context.Context
done:
tu.log.Infof("upgradeDatabaseSchema4: deleted %d rows", numDeleted)
- return tu.setVersion(ctx, tx, 5)
+ return tu.setVersion(ctx, e, 5)
}
// upgradeDatabaseSchema5 upgrades the database schema from version 5 to version 6,
// adding the resources table and clearing empty catchpoint directories.
-func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema5(ctx context.Context, tx *sql.Tx) (err error) {
- arw := NewAccountsSQLReaderWriter(tx)
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema5(ctx context.Context, e db.Executable) (err error) {
+ arw := NewAccountsSQLReaderWriter(e)
- err = accountsCreateResourceTable(ctx, tx)
+ err = accountsCreateResourceTable(ctx, e)
if err != nil {
return fmt.Errorf("upgradeDatabaseSchema5 unable to create resources table : %v", err)
}
@@ -360,7 +359,7 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema5(ctx context.Context
tu.log.Infof("upgradeDatabaseSchema5 upgraded %d out of %d accounts [ %3.1f%% ]", processed, total, float64(processed)*100.0/float64(total))
}
- err = performResourceTableMigration(ctx, tx, migrationProcessLog)
+ err = performResourceTableMigration(ctx, e, migrationProcessLog)
if err != nil {
return fmt.Errorf("upgradeDatabaseSchema5 unable to complete data migration : %v", err)
}
@@ -372,11 +371,11 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema5(ctx context.Context
}
// update version
- return tu.setVersion(ctx, tx, 6)
+ return tu.setVersion(ctx, e, 6)
}
-func (tu *trackerDBSchemaInitializer) deleteUnfinishedCatchpoint(ctx context.Context, tx *sql.Tx) error {
- cts := NewCatchpointSQLReaderWriter(tx)
+func (tu *trackerDBSchemaInitializer) deleteUnfinishedCatchpoint(ctx context.Context, e db.Executable) error {
+ cts := NewCatchpointSQLReaderWriter(e)
// Delete an unfinished catchpoint if there is one.
round, err := cts.ReadCatchpointStateUint64(ctx, trackerdb.CatchpointStateWritingCatchpoint)
if err != nil {
@@ -400,18 +399,18 @@ func (tu *trackerDBSchemaInitializer) deleteUnfinishedCatchpoint(ctx context.Con
// upgradeDatabaseSchema6 upgrades the database schema from version 6 to version 7,
// adding a new onlineaccounts table
// TODO: onlineaccounts: upgrade as needed after switching to the final table version
-func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema6(ctx context.Context, tx *sql.Tx) (err error) {
- err = accountsCreateOnlineAccountsTable(ctx, tx)
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema6(ctx context.Context, e db.Executable) (err error) {
+ err = accountsCreateOnlineAccountsTable(ctx, e)
if err != nil {
return err
}
- err = accountsCreateTxTailTable(ctx, tx)
+ err = accountsCreateTxTailTable(ctx, e)
if err != nil {
return err
}
- err = accountsCreateOnlineRoundParamsTable(ctx, tx)
+ err = accountsCreateOnlineRoundParamsTable(ctx, e)
if err != nil {
return err
}
@@ -426,54 +425,54 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema6(ctx context.Context
lastProgressInfoMsg = time.Now()
tu.log.Infof("upgradeDatabaseSchema6 upgraded %d out of %d accounts [ %3.1f%% ]", processed, total, float64(processed)*100.0/float64(total))
}
- err = performOnlineAccountsTableMigration(ctx, tx, migrationProcessLog, tu.log)
+ err = performOnlineAccountsTableMigration(ctx, e, migrationProcessLog, tu.log)
if err != nil {
return fmt.Errorf("upgradeDatabaseSchema6 unable to complete online account data migration : %w", err)
}
if !tu.newDatabase {
- err = performTxTailTableMigration(ctx, tx, tu.BlockDb.Rdb)
+ err = performTxTailTableMigration(ctx, e, tu.BlockDb.Rdb)
if err != nil {
return fmt.Errorf("upgradeDatabaseSchema6 unable to complete transaction tail data migration : %w", err)
}
}
- err = performOnlineRoundParamsTailMigration(ctx, tx, tu.BlockDb.Rdb, tu.newDatabase, tu.InitProto)
+ err = performOnlineRoundParamsTailMigration(ctx, e, tu.BlockDb.Rdb, tu.newDatabase, tu.InitProto)
if err != nil {
return fmt.Errorf("upgradeDatabaseSchema6 unable to complete online round params data migration : %w", err)
}
- err = tu.deleteUnfinishedCatchpoint(ctx, tx)
+ err = tu.deleteUnfinishedCatchpoint(ctx, e)
if err != nil {
return err
}
- err = accountsCreateCatchpointFirstStageInfoTable(ctx, tx)
+ err = accountsCreateCatchpointFirstStageInfoTable(ctx, e)
if err != nil {
return err
}
- err = accountsCreateUnfinishedCatchpointsTable(ctx, tx)
+ err = accountsCreateUnfinishedCatchpointsTable(ctx, e)
if err != nil {
return err
}
// update version
- return tu.setVersion(ctx, tx, 7)
+ return tu.setVersion(ctx, e, 7)
}
// upgradeDatabaseSchema7 upgrades the database schema from version 7 to version 8.
// adding the kvstore table for box feature support.
-func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema7(ctx context.Context, tx *sql.Tx) (err error) {
- err = accountsCreateBoxTable(ctx, tx)
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema7(ctx context.Context, e db.Executable) (err error) {
+ err = accountsCreateBoxTable(ctx, e)
if err != nil {
return fmt.Errorf("upgradeDatabaseSchema7 unable to create kvstore through createTables : %v", err)
}
- return tu.setVersion(ctx, tx, 8)
+ return tu.setVersion(ctx, e, 8)
}
// upgradeDatabaseSchema8 upgrades the database schema from version 8 to version 9,
// forcing a rebuild of the accounthashes table on betanet nodes. Otherwise it has no effect.
-func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema8(ctx context.Context, tx *sql.Tx) (err error) {
- arw := NewAccountsSQLReaderWriter(tx)
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema8(ctx context.Context, e db.Executable) (err error) {
+ arw := NewAccountsSQLReaderWriter(e)
betanetGenesisHash, _ := crypto.DigestFromString("TBMBVTC7W24RJNNUZCF7LWZD2NMESGZEQSMPG5XQD7JY4O7JKVWQ")
if tu.GenesisHash == betanetGenesisHash && !tu.FromCatchpoint {
// reset hash round to 0, forcing catchpointTracker.initializeHashes to rebuild accounthashes
@@ -482,30 +481,30 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema8(ctx context.Context
return fmt.Errorf("upgradeDatabaseSchema8 unable to reset acctrounds table 'hashbase' round : %v", err)
}
}
- return tu.setVersion(ctx, tx, 9)
+ return tu.setVersion(ctx, e, 9)
}
// upgradeDatabaseSchema9 upgrades the database schema from version 9 to version 10,
// adding a new stateproofverification table,
// scrubbing out all nil values from kvstore table and replace with empty byte slice.
-func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema9(ctx context.Context, tx *sql.Tx) (err error) {
- err = createStateProofVerificationTable(ctx, tx)
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema9(ctx context.Context, e db.Executable) (err error) {
+ err = createStateProofVerificationTable(ctx, e)
if err != nil {
return err
}
- err = performKVStoreNullBlobConversion(ctx, tx)
+ err = performKVStoreNullBlobConversion(ctx, e)
if err != nil {
return fmt.Errorf("upgradeDatabaseSchema9 unable to replace kvstore nil entries with empty byte slices : %v", err)
}
- err = convertOnlineRoundParamsTail(ctx, tx)
+ err = convertOnlineRoundParamsTail(ctx, e)
if err != nil {
return fmt.Errorf("upgradeDatabaseSchema10 unable to convert onlineroundparamstail: %v", err)
}
// update version
- return tu.setVersion(ctx, tx, 10)
+ return tu.setVersion(ctx, e, 10)
}
func removeEmptyDirsOnSchemaUpgrade(dbDirectory string) (err error) {
diff --git a/ledger/store/trackerdb/store.go b/ledger/store/trackerdb/store.go
index 17e0e720a..4140f5be1 100644
--- a/ledger/store/trackerdb/store.go
+++ b/ledger/store/trackerdb/store.go
@@ -24,39 +24,125 @@ import (
"github.com/algorand/go-algorand/util/db"
)
-// BatchScope is the write scope to the store.
-type BatchScope interface {
- MakeCatchpointWriter() (CatchpointWriter, error)
- MakeAccountsWriter() (AccountsWriterExt, error)
- MakeAccountsOptimizedWriter(hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (AccountsWriter, error)
- ResetTransactionWarnDeadline(ctx context.Context, deadline time.Time) (prevDeadline time.Time, err error)
- Testing() TestBatchScope
- MakeSpVerificationCtxWriter() SpVerificationCtxWriter
+// Store is the interface for the tracker db.
+type Store interface {
+ ReaderWriter
+ // settings
+ SetSynchronousMode(ctx context.Context, mode db.SynchronousMode, fullfsync bool) (err error)
+ IsSharedCacheConnection() bool
+ // batch support
+ Batch(fn BatchFn) (err error)
+ BatchContext(ctx context.Context, fn BatchFn) (err error)
+ BeginBatch(ctx context.Context) (Batch, error)
+ // snapshot support
+ Snapshot(fn SnapshotFn) (err error)
+ SnapshotContext(ctx context.Context, fn SnapshotFn) (err error)
+ BeginSnapshot(ctx context.Context) (Snapshot, error)
+ // transaction support
+ Transaction(fn TransactionFn) (err error)
+ TransactionContext(ctx context.Context, fn TransactionFn) (err error)
+ BeginTransaction(ctx context.Context) (Transaction, error)
+ // maintenance
+ Vacuum(ctx context.Context) (stats db.VacuumStats, err error)
+ // testing
+ ResetToV6Test(ctx context.Context) error
+ // cleanup
+ Close()
}
-// SnapshotScope is the read scope to the store.
-type SnapshotScope interface {
+// Reader is the interface for the trackerdb read operations.
+type Reader interface {
MakeAccountsReader() (AccountsReaderExt, error)
- MakeCatchpointReader() (CatchpointReader, error)
+ MakeAccountsOptimizedReader() (AccountsReader, error)
+ MakeOnlineAccountsOptimizedReader() (OnlineAccountsReader, error)
+ MakeSpVerificationCtxReader() SpVerificationCtxReader
+ // catchpoint
+ // Note: BuildMerkleTrie() needs this on the reader handle in sqlite to not get locked by write txns
MakeCatchpointPendingHashesIterator(hashCount int) CatchpointPendingHashesIter
+ // Note: Catchpoint tracker needs this on the reader handle in sqlite to not get locked by write txns
+ MakeCatchpointReader() (CatchpointReader, error)
+ MakeEncodedAccoutsBatchIter() EncodedAccountsBatchIter
+ MakeKVsIter(ctx context.Context) (KVsIter, error)
+}
- MakeSpVerificationCtxReader() SpVerificationCtxReader
+// Writer is the interface for the trackerdb write operations.
+type Writer interface {
+ // trackerdb
+ MakeAccountsWriter() (AccountsWriterExt, error)
+ MakeAccountsOptimizedWriter(hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (AccountsWriter, error)
+ MakeOnlineAccountsOptimizedWriter(hasAccounts bool) (OnlineAccountsWriter, error)
+ MakeSpVerificationCtxWriter() SpVerificationCtxWriter
+ // testing
+ Testing() WriterTestExt
}
-// TransactionScope is the read/write scope to the store.
-type TransactionScope interface {
+// Catchpoint is currently holding most of the methods related to catchpoint.
+//
+// TODO: we still need to do a refactoring pass on catchpoint
+//
+// there are two distinct set of methods present:
+// - read/write ops for managing catchpoint data
+// - read/write ops on trackerdb to support building catchpoints
+// we should split these two sets of methods into two separate interfaces
+type Catchpoint interface {
+ // reader
+ MakeOrderedAccountsIter(accountCount int) OrderedAccountsIter
+ // writer
+ MakeCatchpointWriter() (CatchpointWriter, error)
+ // reader/writer
MakeCatchpointReaderWriter() (CatchpointReaderWriter, error)
- MakeAccountsReaderWriter() (AccountsReaderWriter, error)
- MakeAccountsOptimizedWriter(hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (AccountsWriter, error)
- MakeOnlineAccountsOptimizedWriter(hasAccounts bool) (w OnlineAccountsWriter, err error)
MakeMerkleCommitter(staging bool) (MerkleCommitter, error)
- MakeOrderedAccountsIter(accountCount int) OrderedAccountsIter
- MakeKVsIter(ctx context.Context) (KVsIter, error)
- MakeEncodedAccoutsBatchIter() EncodedAccountsBatchIter
+}
+
+// ReaderWriter is the interface for the trackerdb read/write operations.
+//
+// Some of the operatiosn available here might not be present in neither the Reader nor the Writer interfaces.
+// This is because some operations might require to be able to read and write at the same time.
+type ReaderWriter interface {
+ Reader
+ Writer
+ // init
RunMigrations(ctx context.Context, params Params, log logging.Logger, targetVersion int32) (mgr InitParams, err error)
+ // Note: at the moment, catchpoint methods are only accesible via reader/writer
+ Catchpoint
+}
+
+// BatchScope is an atomic write-only scope to the store.
+type BatchScope interface {
+ Writer
ResetTransactionWarnDeadline(ctx context.Context, deadline time.Time) (prevDeadline time.Time, err error)
- Testing() TestTransactionScope
- MakeSpVerificationCtxReaderWriter() SpVerificationCtxReaderWriter
+}
+
+// Batch is an atomic write-only accecssor to the store.
+type Batch interface {
+ BatchScope
+ Commit() error
+ Close() error
+}
+
+// SnapshotScope is an atomic read-only scope to the store.
+type SnapshotScope interface {
+ Reader
+ ResetTransactionWarnDeadline(ctx context.Context, deadline time.Time) (prevDeadline time.Time, err error)
+}
+
+// Snapshot is an atomic read-only accecssor to the store.
+type Snapshot interface {
+ SnapshotScope
+ Close() error
+}
+
+// TransactionScope is an atomic read/write scope to the store.
+type TransactionScope interface {
+ ReaderWriter
+ ResetTransactionWarnDeadline(ctx context.Context, deadline time.Time) (prevDeadline time.Time, err error)
+}
+
+// Transaction is an atomic read/write accecssor to the store.
+type Transaction interface {
+ TransactionScope
+ Commit() error
+ Close() error
}
// BatchFn is the callback lambda used in `Batch`.
@@ -67,30 +153,3 @@ type SnapshotFn func(ctx context.Context, tx SnapshotScope) error
// TransactionFn is the callback lambda used in `Transaction`.
type TransactionFn func(ctx context.Context, tx TransactionScope) error
-
-// TrackerStore is the interface for the tracker db.
-type TrackerStore interface {
- SetLogger(log logging.Logger)
- SetSynchronousMode(ctx context.Context, mode db.SynchronousMode, fullfsync bool) (err error)
- IsSharedCacheConnection() bool
-
- Batch(fn BatchFn) (err error)
- BatchContext(ctx context.Context, fn BatchFn) (err error)
-
- Snapshot(fn SnapshotFn) (err error)
- SnapshotContext(ctx context.Context, fn SnapshotFn) (err error)
-
- Transaction(fn TransactionFn) (err error)
- TransactionContext(ctx context.Context, fn TransactionFn) (err error)
-
- MakeAccountsOptimizedReader() (AccountsReader, error)
- MakeOnlineAccountsOptimizedReader() (OnlineAccountsReader, error)
-
- MakeCatchpointReaderWriter() (CatchpointReaderWriter, error)
-
- Vacuum(ctx context.Context) (stats db.VacuumStats, err error)
- Close()
- CleanupTest(dbName string, inMemory bool)
-
- ResetToV6Test(ctx context.Context) error
-}
diff --git a/ledger/store/trackerdb/testinterface.go b/ledger/store/trackerdb/testinterface.go
index 961857e87..a406375a2 100644
--- a/ledger/store/trackerdb/testinterface.go
+++ b/ledger/store/trackerdb/testinterface.go
@@ -23,41 +23,26 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/ledger/ledgercore"
- "github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
)
// testinterface.go contains interface extensions specific to testing
// testing interfaces should be made accessible by calling the Testing() method
// on the related interface. Example:
-// testTx := tx.Testing()
+// testTx := tx.Testing()
// these can also be inlined:
-// tx.Testing.AccountsInitTest(...)
-
-// TestBatchScope is an interface to extend BatchScope with test-only methods
-type TestBatchScope interface {
- BatchScope
+// tx.Testing.AccountsInitTest(...)
+// WriterTestExt is an interface to extend Writer with test-only methods
+type WriterTestExt interface {
AccountsInitTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool)
+ AccountsInitLightTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error)
AccountsUpdateSchemaTest(ctx context.Context) (err error)
- RunMigrations(ctx context.Context, params Params, log logging.Logger, targetVersion int32) (mgr InitParams, err error)
ModifyAcctBaseTest() error
}
-// TestTransactionScope is an interface to extend TransactionScope with test-only methods
-type TestTransactionScope interface {
- TransactionScope
-
- MakeAccountsOptimizedReader() (AccountsReader, error)
- MakeOnlineAccountsOptimizedReader() (OnlineAccountsReader, error)
- AccountsInitTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto protocol.ConsensusVersion) (newDatabase bool)
- AccountsInitLightTest(tb testing.TB, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) (newDatabase bool, err error)
-}
-
-// TestAccountsReaderExt is an interface to extend AccountsReaderExt with test-only methods
-type TestAccountsReaderExt interface {
- AccountsReaderExt
-
+// AccountsReaderTestExt is an interface to extend AccountsReaderExt with test-only methods
+type AccountsReaderTestExt interface {
AccountsAllTest() (bals map[basics.Address]basics.AccountData, err error)
CheckCreatablesTest(t *testing.T, iteration int, expectedDbImage map[basics.CreatableIndex]ledgercore.ModifiedCreatable)
}
diff --git a/ledger/testing/randomAccounts.go b/ledger/testing/randomAccounts.go
index 9fb995349..fd4f3b415 100644
--- a/ledger/testing/randomAccounts.go
+++ b/ledger/testing/randomAccounts.go
@@ -100,7 +100,7 @@ func RandomAssetParams() basics.AssetParams {
DefaultFrozen: crypto.RandUint64()%2 == 0,
}
if crypto.RandUint64()%5 != 0 {
- ap.UnitName = fmt.Sprintf("un%x", uint32(crypto.RandUint64()%0x7fffffff))
+ ap.UnitName = fmt.Sprintf("un%x", uint32(crypto.RandUint64()%0x7fffff))
}
if crypto.RandUint64()%5 != 0 {
ap.AssetName = fmt.Sprintf("an%x", uint32(crypto.RandUint64()%0x7fffffff))
diff --git a/ledger/tracker.go b/ledger/tracker.go
index 39ea9d4b1..ebed56d78 100644
--- a/ledger/tracker.go
+++ b/ledger/tracker.go
@@ -134,7 +134,7 @@ type ledgerTracker interface {
// ledgerForTracker defines the part of the ledger that a tracker can
// access. This is particularly useful for testing trackers in isolation.
type ledgerForTracker interface {
- trackerDB() trackerdb.TrackerStore
+ trackerDB() trackerdb.Store
blockDB() db.Pair
trackerLog() logging.Logger
trackerEvalVerified(bookkeeping.Block, eval.LedgerForEvaluator) (ledgercore.StateDelta, error)
@@ -174,7 +174,7 @@ type trackerRegistry struct {
// cached to avoid SQL queries.
dbRound basics.Round
- dbs trackerdb.TrackerStore
+ dbs trackerdb.Store
log logging.Logger
// the synchronous mode that would be used for the account database.
@@ -554,7 +554,7 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) error {
start := time.Now()
ledgerCommitroundCount.Inc(nil)
err = tr.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
- arw, err := tx.MakeAccountsReaderWriter()
+ aw, err := tx.MakeAccountsWriter()
if err != nil {
return err
}
@@ -566,7 +566,7 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) error {
}
}
- return arw.UpdateAccountsRound(dbRound + basics.Round(offset))
+ return aw.UpdateAccountsRound(dbRound + basics.Round(offset))
})
ledgerCommitroundMicros.AddMicrosecondsSince(start, nil)
diff --git a/ledger/trackerdb.go b/ledger/trackerdb.go
index 8955a27c0..bf2307581 100644
--- a/ledger/trackerdb.go
+++ b/ledger/trackerdb.go
@@ -38,44 +38,57 @@ func trackerDBInitialize(l ledgerForTracker, catchpointEnabled bool, dbPathPrefi
return
}
- err = dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error {
- arw, err := tx.MakeAccountsReaderWriter()
- if err != nil {
- return err
- }
+ tp := trackerdb.Params{
+ InitAccounts: l.GenesisAccounts(),
+ InitProto: l.GenesisProtoVersion(),
+ GenesisHash: l.GenesisHash(),
+ FromCatchpoint: false,
+ CatchpointEnabled: catchpointEnabled,
+ DbPathPrefix: dbPathPrefix,
+ BlockDb: bdbs,
+ }
- tp := trackerdb.Params{
- InitAccounts: l.GenesisAccounts(),
- InitProto: l.GenesisProtoVersion(),
- GenesisHash: l.GenesisHash(),
- FromCatchpoint: false,
- CatchpointEnabled: catchpointEnabled,
- DbPathPrefix: dbPathPrefix,
- BlockDb: bdbs,
- }
- var err0 error
- mgr, err0 = tx.RunMigrations(ctx, tp, log, trackerdb.AccountDBVersion)
- if err0 != nil {
- return err0
- }
- lastBalancesRound, err := arw.AccountsRound()
- if err != nil {
- return err
- }
- // Check for blocks DB and tracker DB un-sync
- if lastBalancesRound > lastestBlockRound {
- log.Warnf("trackerDBInitialize: resetting accounts DB (on round %v, but blocks DB's latest is %v)", lastBalancesRound, lastestBlockRound)
- err0 = arw.AccountsReset(ctx)
- if err0 != nil {
- return err0
+ // run migrations
+ mgr, err = dbs.RunMigrations(context.Background(), tp, log, trackerdb.AccountDBVersion)
+ if err != nil {
+ return
+ }
+
+ // create reader for db
+ ar, err := dbs.MakeAccountsReader()
+ if err != nil {
+ return
+ }
+
+ // check current round
+ lastBalancesRound, err := ar.AccountsRound()
+ if err != nil {
+ return
+ }
+
+ // Check for blocks DB and tracker DB un-sync
+ if lastBalancesRound > lastestBlockRound {
+ log.Warnf("trackerDBInitialize: resetting accounts DB (on round %v, but blocks DB's latest is %v)", lastBalancesRound, lastestBlockRound)
+ err = dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error {
+ var aw trackerdb.AccountsWriterExt
+ aw, err = tx.MakeAccountsWriter()
+ if err != nil {
+ return err
+ }
+ err = aw.AccountsReset(ctx)
+ if err != nil {
+ return err
}
- mgr, err0 = tx.RunMigrations(ctx, tp, log, trackerdb.AccountDBVersion)
- if err0 != nil {
- return err0
+ mgr, err = tx.RunMigrations(ctx, tp, log, trackerdb.AccountDBVersion)
+ if err != nil {
+ return err
}
+ return nil
+ })
+ if err != nil {
+ return
}
- return nil
- })
+ }
return
}
diff --git a/ledger/txtail.go b/ledger/txtail.go
index a86a8af5b..7d71ea27e 100644
--- a/ledger/txtail.go
+++ b/ledger/txtail.go
@@ -273,7 +273,7 @@ func (t *txTail) prepareCommit(dcc *deferredCommitContext) (err error) {
}
func (t *txTail) commitRound(ctx context.Context, tx trackerdb.TransactionScope, dcc *deferredCommitContext) error {
- arw, err := tx.MakeAccountsReaderWriter()
+ aw, err := tx.MakeAccountsWriter()
if err != nil {
return err
}
@@ -282,7 +282,7 @@ func (t *txTail) commitRound(ctx context.Context, tx trackerdb.TransactionScope,
// the formula is similar to the committedUpTo: rnd + 1 - retain size
forgetBeforeRound := (dcc.newBase() + 1).SubSaturate(basics.Round(dcc.txTailRetainSize))
baseRound := dcc.oldBase + 1
- if err := arw.TxtailNewRound(ctx, baseRound, dcc.txTailDeltas, forgetBeforeRound); err != nil {
+ if err := aw.TxtailNewRound(ctx, baseRound, dcc.txTailDeltas, forgetBeforeRound); err != nil {
return fmt.Errorf("txTail: unable to persist new round %d : %w", baseRound, err)
}
return nil
diff --git a/ledger/txtail_test.go b/ledger/txtail_test.go
index a21af5120..ed75b591c 100644
--- a/ledger/txtail_test.go
+++ b/ledger/txtail_test.go
@@ -150,7 +150,7 @@ func (t *txTailTestLedger) initialize(ts *testing.T, protoVersion protocol.Conse
// create a corresponding blockdb.
inMemory := true
t.blockDBs, _ = storetesting.DbOpenTest(ts, inMemory)
- t.trackerDBs, _ = sqlitedriver.DbOpenTrackerTest(ts, inMemory)
+ t.trackerDBs, _ = sqlitedriver.OpenForTesting(ts, inMemory)
t.protoVersion = protoVersion
err := t.trackerDBs.Batch(func(transactionCtx context.Context, tx trackerdb.BatchScope) (err error) {
diff --git a/logging/logspec/agreement.go b/logging/logspec/agreement.go
index 45fe5e1b6..c5df1948b 100644
--- a/logging/logspec/agreement.go
+++ b/logging/logspec/agreement.go
@@ -23,6 +23,7 @@ import (
// AgreementType is an enum identifying a specific type of AgreementEvent
// TODO Maybe this should be called AgreementEventType, since these are not actually types of agreements
+//
//go:generate stringer -type=AgreementType
type AgreementType int
diff --git a/logging/logspec/ledger.go b/logging/logspec/ledger.go
index 0ed363340..541917209 100644
--- a/logging/logspec/ledger.go
+++ b/logging/logspec/ledger.go
@@ -23,6 +23,7 @@ import (
// LedgerType is an enum identifying a specific type of LedgerEvent
// TODO Maybe this should be called LedgerEventType, since these are not actually types of ledgers
+//
//go:generate stringer -type=LedgerType
type LedgerType int
diff --git a/logging/logspec/root.go b/logging/logspec/root.go
index 6dd08d1d5..1aa069130 100644
--- a/logging/logspec/root.go
+++ b/logging/logspec/root.go
@@ -25,6 +25,7 @@ import (
// Component is an enum identifying a specific type of Event
// TODO Maybe this should be called ComponentEventType (and change Event to ComponentEvent),
// since these are not actually types of components
+//
//go:generate stringer -type=Component
type Component int
diff --git a/logging/telemetryConfig.go b/logging/telemetryConfig.go
index eefd84e4a..6962bb97b 100644
--- a/logging/telemetryConfig.go
+++ b/logging/telemetryConfig.go
@@ -56,8 +56,9 @@ func TelemetryOverride(env string, telemetryConfig *TelemetryConfig) bool {
// createTelemetryConfig creates a new TelemetryConfig structure with a generated GUID and the appropriate Telemetry endpoint.
// Note: This should only be used/persisted when initially creating 'TelemetryConfigFilename'. Because the methods are called
-// from various tools and goal commands and affect the future default settings for telemetry, we need to inject
-// a "dev" branch check.
+//
+// from various tools and goal commands and affect the future default settings for telemetry, we need to inject
+// a "dev" branch check.
func createTelemetryConfig() TelemetryConfig {
enable := false
diff --git a/logging/telemetryhook.go b/logging/telemetryhook.go
index 8e036eacd..077f1d14d 100644
--- a/logging/telemetryhook.go
+++ b/logging/telemetryhook.go
@@ -274,7 +274,8 @@ func createTelemetryHook(cfg TelemetryConfig, history *logBuffer, hookFactory ho
}
// Note: This will be removed with the externalized telemetry project. Return whether or not the URI was successfully
-// updated.
+//
+// updated.
func (hook *asyncTelemetryHook) UpdateHookURI(uri string) (err error) {
updated := false
diff --git a/logging/telemetryspec/metric.go b/logging/telemetryspec/metric.go
index bdd504e2f..e2c43cf0a 100644
--- a/logging/telemetryspec/metric.go
+++ b/logging/telemetryspec/metric.go
@@ -202,7 +202,7 @@ func (m RoundTimingMetrics) Identifier() Metric {
return roundTimingMetricsIdentifier
}
-//-------------------------------------------------------
+// -------------------------------------------------------
// AccountsUpdate
const accountsUpdateMetricsIdentifier Metric = "AccountsUpdate"
diff --git a/netdeploy/networkTemplate.go b/netdeploy/networkTemplate.go
index 865ad9c30..1a140e74d 100644
--- a/netdeploy/networkTemplate.go
+++ b/netdeploy/networkTemplate.go
@@ -141,9 +141,9 @@ func (t NetworkTemplate) createNodeDirectories(targetFolder string, binDir strin
return
}
- stdout, stderr, err := util.ExecAndCaptureOutput(importKeysCmd, "account", "importrootkey", "-w", string(libgoal.UnencryptedWalletName), "-d", nodeDir)
- if err != nil {
- return nil, nil, fmt.Errorf("goal account importrootkey failed: %w\nstdout: %s\nstderr: %s", err, stdout, stderr)
+ stdout, stderr, execErr := util.ExecAndCaptureOutput(importKeysCmd, "account", "importrootkey", "-w", string(libgoal.UnencryptedWalletName), "-d", nodeDir)
+ if execErr != nil {
+ return nil, nil, fmt.Errorf("goal account importrootkey failed: %w\nstdout: %s\nstderr: %s", execErr, stdout, stderr)
}
}
diff --git a/netdeploy/remote/bootstrappedNetwork.go b/netdeploy/remote/bootstrappedNetwork.go
index 855a2ea3b..bd1d3e744 100644
--- a/netdeploy/remote/bootstrappedNetwork.go
+++ b/netdeploy/remote/bootstrappedNetwork.go
@@ -21,7 +21,7 @@ import (
"os"
)
-//BootstrappedNetwork contains the specs for generating db files
+// BootstrappedNetwork contains the specs for generating db files
type BootstrappedNetwork struct {
NumRounds uint64 `json:"numRounds"`
RoundTransactionsCount uint64 `json:"roundTransactionsCount"`
@@ -30,6 +30,7 @@ type BootstrappedNetwork struct {
GeneratedApplicationCount uint64 `json:"generatedApplicationCount"`
SourceWalletName string `json:"sourceWalletName"`
BalanceRange []int64 `json:"acctBalanceRange"`
+ DeterministicKeys bool `json:"deterministicKeys"`
}
// LoadBootstrappedData loads a bootstrappedFile structure from a json file
diff --git a/netdeploy/remote/buildConfig.go b/netdeploy/remote/buildConfig.go
index bffb85f0a..b7e39d688 100644
--- a/netdeploy/remote/buildConfig.go
+++ b/netdeploy/remote/buildConfig.go
@@ -32,6 +32,7 @@ type BuildConfig struct {
APIEndpoint2 string
APIEndpoint3 string
APIEndpoint4 string
+ AdminAPIToken string
APIToken string
EnableTelemetry bool
TelemetryURI string
diff --git a/netdeploy/remote/deployedNetwork.go b/netdeploy/remote/deployedNetwork.go
index 4bf395514..8c4b3eaee 100644
--- a/netdeploy/remote/deployedNetwork.go
+++ b/netdeploy/remote/deployedNetwork.go
@@ -92,6 +92,9 @@ type netState struct {
assetPerAcct int
appsPerAcct int
+ deterministicKeys bool
+ deterministicAccountCount uint64
+
genesisID string
genesisHash crypto.Digest
poolAddr basics.Address
@@ -153,6 +156,7 @@ func replaceTokens(original string, buildConfig BuildConfig) (expanded string, e
tokenPairs = append(tokenPairs, "{{APIEndpoint3}}", buildConfig.APIEndpoint3)
tokenPairs = append(tokenPairs, "{{APIEndpoint4}}", buildConfig.APIEndpoint4)
tokenPairs = append(tokenPairs, "{{APIToken}}", buildConfig.APIToken)
+ tokenPairs = append(tokenPairs, "{{AdminAPIToken}}", buildConfig.AdminAPIToken)
tokenPairs = append(tokenPairs, "{{EnableTelemetry}}", strconv.FormatBool(buildConfig.EnableTelemetry))
tokenPairs = append(tokenPairs, "{{TelemetryURI}}", buildConfig.TelemetryURI)
tokenPairs = append(tokenPairs, "{{MetricsURI}}", buildConfig.MetricsURI)
@@ -351,7 +355,7 @@ func (cfg DeployedNetwork) BuildNetworkFromTemplate(buildCfg BuildConfig, rootDi
return
}
-//GenerateDatabaseFiles generates database files according to the configurations
+// GenerateDatabaseFiles generates database files according to the configurations
func (cfg DeployedNetwork) GenerateDatabaseFiles(fileCfgs BootstrappedNetwork, genesisFolder string) error {
accounts := make(map[basics.Address]basics.AccountData)
@@ -384,23 +388,24 @@ func (cfg DeployedNetwork) GenerateDatabaseFiles(fileCfgs BootstrappedNetwork, g
default:
}
- accounts[addr] = alloc.State
+ accounts[addr] = alloc.State.AccountData()
}
//initial state
log := logging.NewLogger()
bootstrappedNet := netState{
- nAssets: fileCfgs.GeneratedAssetsCount,
- nApplications: fileCfgs.GeneratedApplicationCount,
- txnState: protocol.PaymentTx,
- roundTxnCnt: fileCfgs.RoundTransactionsCount,
- round: basics.Round(0),
- genesisID: genesis.ID(),
- genesisHash: genesis.Hash(),
- poolAddr: poolAddr,
- sinkAddr: sinkAddr,
- log: log,
+ nAssets: fileCfgs.GeneratedAssetsCount,
+ nApplications: fileCfgs.GeneratedApplicationCount,
+ txnState: protocol.PaymentTx,
+ roundTxnCnt: fileCfgs.RoundTransactionsCount,
+ round: basics.Round(0),
+ genesisID: genesis.ID(),
+ genesisHash: genesis.Hash(),
+ poolAddr: poolAddr,
+ sinkAddr: sinkAddr,
+ log: log,
+ deterministicKeys: fileCfgs.DeterministicKeys,
}
var params config.ConsensusParams
@@ -422,6 +427,9 @@ func (cfg DeployedNetwork) GenerateDatabaseFiles(fileCfgs BootstrappedNetwork, g
rand.Seed(time.Now().UnixNano())
min := fileCfgs.BalanceRange[0]
max := fileCfgs.BalanceRange[1]
+ // TODO: Randomly assigning target balance in a range may cause tests to behave unpredictably,
+ // if the randomly selected balance is too low for proper testing.
+ // consider inserting a hardcoded balance sufficient for your tests.
bal := rand.Int63n(max-min) + min
bootstrappedNet.fundPerAccount = basics.MicroAlgos{Raw: uint64(bal)}
srcAcct := accounts[src]
@@ -455,6 +463,10 @@ func (cfg DeployedNetwork) GenerateDatabaseFiles(fileCfgs BootstrappedNetwork, g
for i := uint64(bootstrappedNet.round); i < fileCfgs.NumRounds; i++ {
bootstrappedNet.round++
blk, _ := createBlock(src, prev, fileCfgs.RoundTransactionsCount, &bootstrappedNet, params, log)
+ // don't allow the ledger to fall more than 10 rounds behind before adding more
+ for int(bootstrappedNet.round)-int(l.LatestTrackerCommitted()) > 10 {
+ time.Sleep(100 * time.Millisecond)
+ }
err = l.AddBlock(blk, agreement.Certificate{Round: bootstrappedNet.round})
if err != nil {
fmt.Printf("Error %v\n", err)
@@ -486,6 +498,15 @@ func getGenesisAlloc(name string, allocation []bookkeeping.GenesisAllocation) bo
return bookkeeping.GenesisAllocation{}
}
+// deterministicKeypair returns a key based on the provided index
+func deterministicKeypair(i uint64) *crypto.SignatureSecrets {
+ var seed crypto.Seed
+ binary.LittleEndian.PutUint64(seed[:], i)
+ s := crypto.GenerateSignatureSecrets(seed)
+ return s
+}
+
+// keypair returns a random key
func keypair() *crypto.SignatureSecrets {
var seed crypto.Seed
crypto.RandBytes(seed[:])
@@ -576,6 +597,10 @@ func generateAccounts(src basics.Address, roundTxnCnt uint64, prev bookkeeping.B
//create accounts
bootstrappedNet.round++
blk, _ := createBlock(src, prev, roundTxnCnt, bootstrappedNet, csParams, log)
+ // don't allow the ledger to fall more than 10 rounds behind before adding more
+ for int(bootstrappedNet.round)-int(l.LatestTrackerCommitted()) > 10 {
+ time.Sleep(100 * time.Millisecond)
+ }
err := l.AddBlock(blk, agreement.Certificate{Round: bootstrappedNet.round})
if err != nil {
fmt.Printf("Error %v\n", err)
@@ -659,7 +684,13 @@ func createSignedTx(src basics.Address, round basics.Round, params config.Consen
if !bootstrappedNet.accountsCreated {
for i := uint64(0); i < n; i++ {
- secretDst := keypair()
+ var secretDst *crypto.SignatureSecrets
+ if bootstrappedNet.deterministicKeys {
+ secretDst = deterministicKeypair(bootstrappedNet.deterministicAccountCount)
+ bootstrappedNet.deterministicAccountCount++
+ } else {
+ secretDst = keypair()
+ }
dst := basics.Address(secretDst.SignatureVerifier)
bootstrappedNet.accounts = append(bootstrappedNet.accounts, dst)
@@ -1064,6 +1095,9 @@ func computeRootStorage(nodeCount, relayCount int) int {
// 10 per node should be good for a week (add relayCount * 0 so param is used)
minGB := 20 + (nodeCount * 10) + (relayCount * 50)
return minGB
+ // TODO: this function appears to insufficiently provision EBS nodes in some cases
+ // if your nodes have insufficient storage, consider using a reasonable hardcoded value like
+ // return 256
}
func computeSSDStorage(nodeCount, relayCount int) int {
diff --git a/netdeploy/remote/nodeConfig.go b/netdeploy/remote/nodeConfig.go
index c5a9b1a6d..5a8b3ac43 100644
--- a/netdeploy/remote/nodeConfig.go
+++ b/netdeploy/remote/nodeConfig.go
@@ -23,6 +23,7 @@ type NodeConfig struct {
NetAddress string `json:",omitempty"`
APIEndpoint string `json:",omitempty"`
APIToken string `json:",omitempty"`
+ AdminAPIToken string `json:",omitempty"`
EnableTelemetry bool // Needs to also be configured host-wide (assign logging host name)
TelemetryURI string `json:",omitempty"` // Needs to be HostConfig
EnableMetrics bool // Needs to also be configured host-wide (register DNS entry)
diff --git a/netdeploy/remote/nodecfg/nodeDir.go b/netdeploy/remote/nodecfg/nodeDir.go
index 59f2eba50..8d17f01d5 100644
--- a/netdeploy/remote/nodecfg/nodeDir.go
+++ b/netdeploy/remote/nodecfg/nodeDir.go
@@ -40,18 +40,18 @@ type nodeDir struct {
}
// * Configure:
-// * IsRelay
-// * NetAddress
-// * APIEndpoint
-// * APIToken
-// * EnableTelemetry
-// * TelemetryURI
-// * EnableMetrics
-// * EnableService
-// * CronTabSchedule
-// * EnableBlockStats
-// * DashboardEndpoint
-// * DeadlockOverride
+// - IsRelay
+// - NetAddress
+// - APIEndpoint
+// - APIToken
+// - EnableTelemetry
+// - TelemetryURI
+// - EnableMetrics
+// - EnableService
+// - CronTabSchedule
+// - EnableBlockStats
+// - DashboardEndpoint
+// - DeadlockOverride
func (nd *nodeDir) configure() (err error) {
fmt.Fprintf(os.Stdout, "Configuring Node %s\n", nd.Name)
if err = nd.configureRelay(nd.IsRelay()); err != nil {
@@ -66,6 +66,10 @@ func (nd *nodeDir) configure() (err error) {
fmt.Fprintf(os.Stdout, "Error during configureAPIToken: %s\n", err)
return
}
+ if err = nd.configureAdminAPIToken(nd.AdminAPIToken); err != nil {
+ fmt.Fprintf(os.Stdout, "Error during configureAdminAPIToken: %s\n", err)
+ return
+ }
if err = nd.configureTelemetry(nd.EnableTelemetry); err != nil {
fmt.Fprintf(os.Stdout, "Error during configureTelemetry: %s\n", err)
return
@@ -144,8 +148,9 @@ func (nd *nodeDir) configureNetAddress() (err error) {
nd.config.NetAddress = nd.NetAddress
if nd.IsRelay() && nd.NetAddress[0] == ':' {
fmt.Fprintf(os.Stdout, " - adding to relay addresses\n")
- domainName := strings.Replace(nd.config.DNSBootstrapID, "<network>", string(nd.configurator.genesisData.Network), -1)
- nd.configurator.addRelaySrv(domainName, nd.NetAddress)
+ for _, bootstrapRecord := range nd.config.DNSBootstrapArray(nd.configurator.genesisData.Network) {
+ nd.configurator.addRelaySrv(bootstrapRecord.PrimarySRVBootstrap, nd.NetAddress)
+ }
}
err = nd.saveConfig()
return
@@ -180,6 +185,21 @@ func (nd *nodeDir) configureAPIToken(token string) (err error) {
return nd.saveConfig()
}
+func (nd *nodeDir) configureAdminAPIToken(token string) (err error) {
+ if token == "" {
+ return
+ }
+ if err = nd.ensureConfig(); err != nil {
+ return
+ }
+ fmt.Fprintf(os.Stdout, " - Assigning AdminAPIToken: %s\n", token)
+ err = os.WriteFile(filepath.Join(nd.dataDir, tokens.AlgodAdminTokenFilename), []byte(token), 0600)
+ if err != nil {
+ return err
+ }
+ return nd.saveConfig()
+}
+
func (nd *nodeDir) configureTelemetry(enable bool) (err error) {
cfg, created, cfgErr := logging.EnsureTelemetryConfigCreated(nil, "")
if cfgErr != nil {
@@ -309,7 +329,8 @@ func (nd *nodeDir) configureDNSBootstrap() (err error) {
}
if nd.config.DNSBootstrapID == config.GetDefaultLocal().DNSBootstrapID {
- nd.config.DNSBootstrapID = strings.Replace(nd.config.DNSBootstrapID, "algorand", "algodev", -1)
+ // Ensure using our testing network without fallback support
+ nd.config.DNSBootstrapID = "<network>.algodev.network"
err = nd.saveConfig()
}
return
diff --git a/network/limited_reader_slurper.go b/network/limited_reader_slurper.go
index 2bdbb756a..568b165e5 100644
--- a/network/limited_reader_slurper.go
+++ b/network/limited_reader_slurper.go
@@ -32,6 +32,12 @@ type LimitedReaderSlurper struct {
// remainedUnallocatedSpace is how much more memory we are allowed to allocate for this reader beyond the base allocation.
remainedUnallocatedSpace uint64
+ // currentMessageBytesRead is the size of the message we are currently reading.
+ currentMessageBytesRead uint64
+
+ // currentMessageMaxSize is the maximum number of bytes the current message type is allowed to have.
+ currentMessageMaxSize uint64
+
// the buffers array contain the memory buffers used to store the data. The first level array is preallocated
// dependening on the desired base allocation. The rest of the levels are dynamically allocated on demand.
buffers [][]byte
@@ -48,6 +54,8 @@ func MakeLimitedReaderSlurper(baseAllocation, maxAllocation uint64) *LimitedRead
lrs := &LimitedReaderSlurper{
remainedUnallocatedSpace: maxAllocation - baseAllocation,
lastBuffer: 0,
+ currentMessageBytesRead: 0,
+ currentMessageMaxSize: 0,
buffers: make([][]byte, 1+(maxAllocation-baseAllocation+allocationStep-1)/allocationStep),
}
lrs.buffers[0] = make([]byte, 0, baseAllocation)
@@ -91,6 +99,10 @@ func (s *LimitedReaderSlurper) Read(reader io.Reader) error {
entireBuffer := readBuffer[:cap(readBuffer)]
// read the data into the unused area of the read buffer.
n, err := reader.Read(entireBuffer[len(readBuffer):])
+ s.currentMessageBytesRead += uint64(n)
+ if s.currentMessageMaxSize > 0 && s.currentMessageBytesRead > s.currentMessageMaxSize {
+ return ErrIncomingMsgTooLarge
+ }
if err != nil {
if err == io.EOF {
s.buffers[s.lastBuffer] = readBuffer[:len(readBuffer)+n]
@@ -110,12 +122,14 @@ func (s *LimitedReaderSlurper) Size() (size uint64) {
return
}
-// Reset clears the buffered data
-func (s *LimitedReaderSlurper) Reset() {
+// Reset clears the buffered data and sets a limit for the upcoming message
+func (s *LimitedReaderSlurper) Reset(n uint64) {
for i := 1; i <= s.lastBuffer; i++ {
s.remainedUnallocatedSpace += uint64(cap(s.buffers[i]))
s.buffers[i] = nil
}
+ s.currentMessageMaxSize = n
+ s.currentMessageBytesRead = 0
s.buffers[0] = s.buffers[0][:0]
s.lastBuffer = 0
}
diff --git a/network/limited_reader_slurper_test.go b/network/limited_reader_slurper_test.go
index 92509d95f..bcd6eddec 100644
--- a/network/limited_reader_slurper_test.go
+++ b/network/limited_reader_slurper_test.go
@@ -114,7 +114,7 @@ func benchmarkLimitedReaderSlurper(b *testing.B, arraySize uint64) {
err := reader.Read(buffers[i])
require.NoError(b, err)
reader.Bytes()
- reader.Reset()
+ reader.Reset(0)
}
}
func BenchmarkLimitedReaderSlurper(b *testing.B) {
@@ -154,3 +154,46 @@ func TestLimitedReaderSlurperBufferAllocations(t *testing.T) {
}
}
}
+
+func TestLimitedReaderSlurperPerMessageMaxSize(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ type randMode int
+
+ const (
+ modeLessThan randMode = iota
+ modeEqual
+ modeGreaterThan
+ )
+
+ maxMessageSize := 1024
+ slurper := MakeLimitedReaderSlurper(512, uint64(maxMessageSize))
+ for i := 0; i < 30; i++ {
+ var b []byte
+ randPick := randMode(crypto.RandUint64() % uint64(3))
+ currentSize := crypto.RandUint64()%uint64(maxMessageSize) + 1
+ slurper.Reset(currentSize)
+ if randPick == modeLessThan {
+ dataSize := crypto.RandUint64() % currentSize
+ b = make([]byte, dataSize)
+ crypto.RandBytes(b[:])
+ err := slurper.Read(bytes.NewBuffer(b))
+ require.NoError(t, err)
+ require.Len(t, slurper.Bytes(), int(dataSize))
+ } else if randPick == modeEqual {
+ dataSize := currentSize
+ b = make([]byte, dataSize)
+ crypto.RandBytes(b[:])
+ err := slurper.Read(bytes.NewBuffer(b))
+ require.NoError(t, err)
+ require.Len(t, slurper.Bytes(), int(currentSize))
+ } else if randPick == modeGreaterThan {
+ dataSize := currentSize + 1
+ b = make([]byte, dataSize)
+ crypto.RandBytes(b[:])
+ err := slurper.Read(bytes.NewBuffer(b))
+ require.Error(t, err)
+ }
+ }
+}
diff --git a/network/msgOfInterest.go b/network/msgOfInterest.go
index c7a3faa12..eb08155ba 100644
--- a/network/msgOfInterest.go
+++ b/network/msgOfInterest.go
@@ -26,6 +26,7 @@ import (
var errUnableUnmarshallMessage = errors.New("unmarshalMessageOfInterest: could not unmarshall message")
var errInvalidMessageOfInterest = errors.New("unmarshalMessageOfInterest: message missing the tags key")
var errInvalidMessageOfInterestLength = errors.New("unmarshalMessageOfInterest: message length is too long")
+var errInvalidMessageOfInterestInvalidTag = errors.New("unmarshalMessageOfInterest: invalid tag")
const maxMessageOfInterestTags = 1024
const topicsEncodingSeparator = ","
@@ -46,6 +47,12 @@ func unmarshallMessageOfInterest(data []byte) (map[protocol.Tag]bool, error) {
// convert the tags into a tags map.
msgTagsMap := make(map[protocol.Tag]bool, len(tags))
for _, tag := range strings.Split(string(tags), topicsEncodingSeparator) {
+ if len(tag) != protocol.TagLength {
+ return nil, errInvalidMessageOfInterestInvalidTag
+ }
+ if _, ok := protocol.TagMap[protocol.Tag(tag)]; !ok {
+ return nil, errInvalidMessageOfInterestInvalidTag
+ }
msgTagsMap[protocol.Tag(tag)] = true
}
return msgTagsMap, nil
@@ -80,3 +87,13 @@ func MarshallMessageOfInterestMap(tagmap map[protocol.Tag]bool) []byte {
topics := Topics{Topic{key: "tags", data: []byte(tags)}}
return topics.MarshallTopics()
}
+
+// MessageOfInterestMaxSize returns the maximum size of a MI message sent over the network
+// by encoding all of the tags currenttly in use.
+func MessageOfInterestMaxSize() int {
+ allTags := make(map[protocol.Tag]bool, len(protocol.TagList))
+ for _, tag := range protocol.TagList {
+ allTags[tag] = true
+ }
+ return len(MarshallMessageOfInterest(protocol.TagList))
+}
diff --git a/network/msgp_gen.go b/network/msgp_gen.go
index 70b03ccfc..8c32ee026 100644
--- a/network/msgp_gen.go
+++ b/network/msgp_gen.go
@@ -4,6 +4,8 @@ package network
import (
"github.com/algorand/msgp/msgp"
+
+ "github.com/algorand/go-algorand/crypto"
)
// The following msgp objects are implemented in this file:
@@ -14,6 +16,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> DisconnectReasonMaxSize()
//
// identityChallenge
// |-----> (*) MarshalMsg
@@ -22,6 +25,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> IdentityChallengeMaxSize()
//
// identityChallengeResponse
// |-----> (*) MarshalMsg
@@ -30,6 +34,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> IdentityChallengeResponseMaxSize()
//
// identityChallengeResponseSigned
// |-----> (*) MarshalMsg
@@ -38,6 +43,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> IdentityChallengeResponseSignedMaxSize()
//
// identityChallengeSigned
// |-----> (*) MarshalMsg
@@ -46,6 +52,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> IdentityChallengeSignedMaxSize()
//
// identityChallengeValue
// |-----> (*) MarshalMsg
@@ -54,6 +61,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> IdentityChallengeValueMaxSize()
//
// identityVerificationMessage
// |-----> (*) MarshalMsg
@@ -62,6 +70,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> IdentityVerificationMessageMaxSize()
//
// identityVerificationMessageSigned
// |-----> (*) MarshalMsg
@@ -70,6 +79,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> IdentityVerificationMessageSignedMaxSize()
//
// MarshalMsg implements msgp.Marshaler
@@ -118,6 +128,12 @@ func (z disconnectReason) MsgIsZero() bool {
return z == ""
}
+// MaxSize returns a maximum valid message size for this message type
+func DisconnectReasonMaxSize() (s int) {
+ panic("Unable to determine max size: String type string(z) is unbounded")
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *identityChallenge) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -290,6 +306,15 @@ func (z *identityChallenge) MsgIsZero() bool {
return ((*z).Key.MsgIsZero()) && ((*z).Challenge == (identityChallengeValue{})) && (len((*z).PublicAddress) == 0)
}
+// MaxSize returns a maximum valid message size for this message type
+func IdentityChallengeMaxSize() (s int) {
+ s = 1 + 3 + crypto.PublicKeyMaxSize() + 2
+ // Calculating size of array: z.Challenge
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ s += 2 + msgp.BytesPrefixSize + maxAddressLen
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *identityChallengeResponse) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -442,6 +467,17 @@ func (z *identityChallengeResponse) MsgIsZero() bool {
return ((*z).Key.MsgIsZero()) && ((*z).Challenge == (identityChallengeValue{})) && ((*z).ResponseChallenge == (identityChallengeValue{}))
}
+// MaxSize returns a maximum valid message size for this message type
+func IdentityChallengeResponseMaxSize() (s int) {
+ s = 1 + 3 + crypto.PublicKeyMaxSize() + 2
+ // Calculating size of array: z.Challenge
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ s += 3
+ // Calculating size of array: z.ResponseChallenge
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *identityChallengeResponseSigned) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -571,6 +607,12 @@ func (z *identityChallengeResponseSigned) MsgIsZero() bool {
return ((*z).Msg.MsgIsZero()) && ((*z).Signature.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func IdentityChallengeResponseSignedMaxSize() (s int) {
+ s = 1 + 4 + IdentityChallengeResponseMaxSize() + 4 + crypto.SignatureMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *identityChallengeSigned) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -700,6 +742,12 @@ func (z *identityChallengeSigned) MsgIsZero() bool {
return ((*z).Msg.MsgIsZero()) && ((*z).Signature.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func IdentityChallengeSignedMaxSize() (s int) {
+ s = 1 + 3 + IdentityChallengeMaxSize() + 4 + crypto.SignatureMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *identityChallengeValue) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -739,6 +787,13 @@ func (z *identityChallengeValue) MsgIsZero() bool {
return (*z) == (identityChallengeValue{})
}
+// MaxSize returns a maximum valid message size for this message type
+func IdentityChallengeValueMaxSize() (s int) {
+ // Calculating size of array: z
+ s = msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *identityVerificationMessage) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -845,6 +900,14 @@ func (z *identityVerificationMessage) MsgIsZero() bool {
return ((*z).ResponseChallenge == (identityChallengeValue{}))
}
+// MaxSize returns a maximum valid message size for this message type
+func IdentityVerificationMessageMaxSize() (s int) {
+ s = 1 + 3
+ // Calculating size of array: z.ResponseChallenge
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *identityVerificationMessageSigned) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -1086,3 +1149,12 @@ func (z *identityVerificationMessageSigned) Msgsize() (s int) {
func (z *identityVerificationMessageSigned) MsgIsZero() bool {
return ((*z).Msg.ResponseChallenge == (identityChallengeValue{})) && ((*z).Signature.MsgIsZero())
}
+
+// MaxSize returns a maximum valid message size for this message type
+func IdentityVerificationMessageSignedMaxSize() (s int) {
+ s = 1 + 4 + 1 + 3
+ // Calculating size of array: z.Msg.ResponseChallenge
+ s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize))
+ s += 4 + crypto.SignatureMaxSize()
+ return
+}
diff --git a/network/phonebook.go b/network/phonebook.go
index ac5914a29..ad07a2b5f 100644
--- a/network/phonebook.go
+++ b/network/phonebook.go
@@ -22,6 +22,7 @@ import (
"time"
"github.com/algorand/go-deadlock"
+ "golang.org/x/exp/slices"
)
// when using GetAddresses with getAllAddresses, all the addresses will be retrieved, regardless
@@ -287,8 +288,7 @@ func shuffleStrings(set []string) {
func shuffleSelect(set []string, n int) []string {
if n >= len(set) || n == getAllAddresses {
// return shuffled copy of everything
- out := make([]string, len(set))
- copy(out, set)
+ out := slices.Clone(set)
shuffleStrings(out)
return out
}
diff --git a/network/topics.go b/network/topics.go
index ded264a0c..5cb037073 100644
--- a/network/topics.go
+++ b/network/topics.go
@@ -112,7 +112,7 @@ func UnmarshallTopics(buffer []byte) (ts Topics, err error) {
// read the data length
dataLen, nr := binary.Uvarint(buffer[idx:])
- if nr <= 0 || dataLen > maxMessageLength {
+ if nr <= 0 || dataLen > MaxMessageLength {
return nil, fmt.Errorf("UnmarshallTopics: could not read the data length")
}
idx += nr
diff --git a/network/wsNetwork.go b/network/wsNetwork.go
index 919655352..56314f14b 100644
--- a/network/wsNetwork.go
+++ b/network/wsNetwork.go
@@ -163,6 +163,8 @@ const (
PeersConnectedIn PeerOption = iota
// PeersPhonebookRelays specifies all relays in the phonebook
PeersPhonebookRelays PeerOption = iota
+ // PeersPhonebookArchivalNodes specifies all archival nodes (relay or p2p)
+ PeersPhonebookArchivalNodes PeerOption = iota
// PeersPhonebookArchivers specifies all archivers in the phonebook
PeersPhonebookArchivers PeerOption = iota
)
@@ -269,6 +271,10 @@ type OutgoingMessage struct {
Payload []byte
Topics Topics
reason disconnectReason // used when Action == Disconnect
+
+ // OnRelease is a function called when outgoing message, resulting from this incoming message, is released
+ // either by being sent or discarded.
+ OnRelease func()
}
// ForwardingPolicy is an enum indicating to whom we should send a message
@@ -465,6 +471,9 @@ type WebsocketNetwork struct {
// protocolVersion is an actual version announced as ProtocolVersionHeader
protocolVersion string
+
+ // resolveSRVRecords is a function that resolves SRV records for a given service, protocol and name
+ resolveSRVRecords func(service string, protocol string, name string, fallbackDNSResolverAddress string, secure bool) (addrs []string, err error)
}
const (
@@ -685,6 +694,13 @@ func (wn *WebsocketNetwork) GetPeers(options ...PeerOption) []Peer {
peerCore := makePeerCore(wn, addr, wn.GetRoundTripper(), "" /*origin address*/)
outPeers = append(outPeers, &peerCore)
}
+ case PeersPhonebookArchivalNodes:
+ var addrs []string
+ addrs = wn.phonebook.GetAddresses(1000, PhoneBookEntryRelayRole)
+ for _, addr := range addrs {
+ peerCore := makePeerCore(wn, addr, wn.GetRoundTripper(), "" /*origin address*/)
+ outPeers = append(outPeers, &peerCore)
+ }
case PeersPhonebookArchivers:
// return copy of phonebook, which probably also contains peers we're connected to, but if it doesn't maybe we shouldn't be making new connections to those peers (because they disappeared from the directory)
var addrs []string
@@ -1308,7 +1324,7 @@ func (wn *WebsocketNetwork) messageHandlerThread(peersConnectivityCheckCh <-chan
wn.log.Warnf("WebsocketNetwork.messageHandlerThread: WebsocketNetwork.Broadcast returned unexpected error %v", err)
}
case Respond:
- err := msg.Sender.(*wsPeer).Respond(wn.ctx, msg, outmsg.Topics)
+ err := msg.Sender.(*wsPeer).Respond(wn.ctx, msg, outmsg)
if err != nil && err != wn.ctx.Err() {
wn.log.Warnf("WebsocketNetwork.messageHandlerThread: wsPeer.Respond returned unexpected error %v", err)
}
@@ -1712,20 +1728,7 @@ func (wn *WebsocketNetwork) meshThread() {
wn.DisconnectPeers()
}
- // TODO: only do DNS fetch every N seconds? Honor DNS TTL? Trust DNS library we're using to handle caching and TTL?
- dnsBootstrapArray := wn.config.DNSBootstrapArray(wn.NetworkID)
- for _, dnsBootstrap := range dnsBootstrapArray {
- relayAddrs, archiveAddrs := wn.getDNSAddrs(dnsBootstrap)
- if len(relayAddrs) > 0 {
- wn.log.Debugf("got %d relay dns addrs, %#v", len(relayAddrs), relayAddrs[:imin(5, len(relayAddrs))])
- wn.phonebook.ReplacePeerList(relayAddrs, dnsBootstrap, PhoneBookEntryRelayRole)
- } else {
- wn.log.Infof("got no relay DNS addrs for network %s", wn.NetworkID)
- }
- if len(archiveAddrs) > 0 {
- wn.phonebook.ReplacePeerList(archiveAddrs, dnsBootstrap, PhoneBookEntryArchiverRole)
- }
- }
+ wn.refreshRelayArchivePhonebookAddresses()
// as long as the call to checkExistingConnectionsNeedDisconnecting is deleting existing connections, we want to
// kick off the creation of new connections.
@@ -1751,6 +1754,36 @@ func (wn *WebsocketNetwork) meshThread() {
}
}
+func (wn *WebsocketNetwork) refreshRelayArchivePhonebookAddresses() {
+ // TODO: only do DNS fetch every N seconds? Honor DNS TTL? Trust DNS library we're using to handle caching and TTL?
+ dnsBootstrapArray := wn.config.DNSBootstrapArray(wn.NetworkID)
+
+ for _, dnsBootstrap := range dnsBootstrapArray {
+ primaryRelayAddrs, primaryArchiveAddrs := wn.getDNSAddrs(dnsBootstrap.PrimarySRVBootstrap)
+
+ if dnsBootstrap.BackupSRVBootstrap != "" {
+ backupRelayAddrs, backupArchiveAddrs := wn.getDNSAddrs(dnsBootstrap.BackupSRVBootstrap)
+ dedupedRelayAddresses := wn.mergePrimarySecondaryRelayAddressSlices(wn.NetworkID, primaryRelayAddrs,
+ backupRelayAddrs, dnsBootstrap.DedupExp)
+ wn.updatePhonebookAddresses(dedupedRelayAddresses, append(primaryArchiveAddrs, backupArchiveAddrs...))
+ } else {
+ wn.updatePhonebookAddresses(primaryRelayAddrs, primaryArchiveAddrs)
+ }
+ }
+}
+
+func (wn *WebsocketNetwork) updatePhonebookAddresses(relayAddrs []string, archiveAddrs []string) {
+ if len(relayAddrs) > 0 {
+ wn.log.Debugf("got %d relay dns addrs, %#v", len(relayAddrs), relayAddrs[:imin(5, len(relayAddrs))])
+ wn.phonebook.ReplacePeerList(relayAddrs, string(wn.NetworkID), PhoneBookEntryRelayRole)
+ } else {
+ wn.log.Infof("got no relay DNS addrs for network %s", wn.NetworkID)
+ }
+ if len(archiveAddrs) > 0 {
+ wn.phonebook.ReplacePeerList(archiveAddrs, string(wn.NetworkID), PhoneBookEntryArchiverRole)
+ }
+}
+
// checkNewConnectionsNeeded checks to see if we need to have more connections to meet the GossipFanout target.
// if we do, it will spin async connection go routines.
// it returns false if no connections are needed, and true otherwise.
@@ -1965,9 +1998,48 @@ func (wn *WebsocketNetwork) prioWeightRefresh() {
}
}
+// This logic assumes that the relay address suffixes
+// correspond to the primary/backup network conventions. If this proves to be false, i.e. one network's
+// suffix is a substring of another network's suffix, then duplicates can end up in the merged slice.
+func (wn *WebsocketNetwork) mergePrimarySecondaryRelayAddressSlices(network protocol.NetworkID,
+ primaryRelayAddresses []string, secondaryRelayAddresses []string, dedupExp *regexp.Regexp) (dedupedRelayAddresses []string) {
+
+ if dedupExp == nil {
+ // No expression provided, so just append the slices without deduping
+ return append(primaryRelayAddresses, secondaryRelayAddresses...)
+ }
+
+ var relayAddressPrefixToValue = make(map[string]string, 2*len(primaryRelayAddresses))
+
+ for _, pra := range primaryRelayAddresses {
+ var normalizedPra = strings.ToLower(pra)
+
+ var pfxKey = dedupExp.ReplaceAllString(normalizedPra, "")
+ if _, exists := relayAddressPrefixToValue[pfxKey]; !exists {
+ relayAddressPrefixToValue[pfxKey] = normalizedPra
+ }
+ }
+
+ for _, sra := range secondaryRelayAddresses {
+ var normalizedSra = strings.ToLower(sra)
+ var pfxKey = dedupExp.ReplaceAllString(normalizedSra, "")
+
+ if _, exists := relayAddressPrefixToValue[pfxKey]; !exists {
+ relayAddressPrefixToValue[pfxKey] = normalizedSra
+ }
+ }
+
+ dedupedRelayAddresses = make([]string, 0, len(relayAddressPrefixToValue))
+ for _, value := range relayAddressPrefixToValue {
+ dedupedRelayAddresses = append(dedupedRelayAddresses, value)
+ }
+
+ return
+}
+
func (wn *WebsocketNetwork) getDNSAddrs(dnsBootstrap string) (relaysAddresses []string, archiverAddresses []string) {
var err error
- relaysAddresses, err = tools_network.ReadFromSRV("algobootstrap", "tcp", dnsBootstrap, wn.config.FallbackDNSResolverAddress, wn.config.DNSSecuritySRVEnforced())
+ relaysAddresses, err = wn.resolveSRVRecords("algobootstrap", "tcp", dnsBootstrap, wn.config.FallbackDNSResolverAddress, wn.config.DNSSecuritySRVEnforced())
if err != nil {
// only log this warning on testnet or devnet
if wn.NetworkID == config.Devnet || wn.NetworkID == config.Testnet {
@@ -1976,7 +2048,7 @@ func (wn *WebsocketNetwork) getDNSAddrs(dnsBootstrap string) (relaysAddresses []
relaysAddresses = nil
}
if wn.config.EnableCatchupFromArchiveServers || wn.config.EnableBlockServiceFallbackToArchiver {
- archiverAddresses, err = tools_network.ReadFromSRV("archive", "tcp", dnsBootstrap, wn.config.FallbackDNSResolverAddress, wn.config.DNSSecuritySRVEnforced())
+ archiverAddresses, err = wn.resolveSRVRecords("archive", "tcp", dnsBootstrap, wn.config.FallbackDNSResolverAddress, wn.config.DNSSecuritySRVEnforced())
if err != nil {
// only log this warning on testnet or devnet
if wn.NetworkID == config.Devnet || wn.NetworkID == config.Testnet {
@@ -2244,13 +2316,13 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) {
// if we abort before making a wsPeer this cleanup logic will close the connection
closeEarly := func(msg string) {
deadline := time.Now().Add(peerDisconnectionAckDuration)
- err := conn.WriteControl(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseProtocolError, msg), deadline)
- if err != nil {
- wn.log.Infof("tryConnect: failed to write CloseMessage to connection for %s", conn.RemoteAddr().String())
+ err2 := conn.WriteControl(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseProtocolError, msg), deadline)
+ if err2 != nil {
+ wn.log.Infof("tryConnect: failed to write CloseMessage to connection for %s: %v", conn.RemoteAddr().String(), err2)
}
- err = conn.CloseWithoutFlush()
- if err != nil {
- wn.log.Infof("tryConnect: failed to CloseWithoutFlush to connection for %s", conn.RemoteAddr().String())
+ err2 = conn.CloseWithoutFlush()
+ if err2 != nil {
+ wn.log.Infof("tryConnect: failed to CloseWithoutFlush to connection for %s: %v", conn.RemoteAddr().String(), err2)
}
}
@@ -2377,14 +2449,15 @@ func (wn *WebsocketNetwork) SetPeerData(peer Peer, key string, value interface{}
func NewWebsocketNetwork(log logging.Logger, config config.Local, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID, nodeInfo NodeInfo) (wn *WebsocketNetwork, err error) {
phonebook := MakePhonebook(config.ConnectionsRateLimitingCount,
time.Duration(config.ConnectionsRateLimitingWindowSeconds)*time.Second)
- phonebook.ReplacePeerList(phonebookAddresses, config.DNSBootstrapID, PhoneBookEntryRelayRole)
+ phonebook.ReplacePeerList(phonebookAddresses, string(networkID), PhoneBookEntryRelayRole)
wn = &WebsocketNetwork{
- log: log,
- config: config,
- phonebook: phonebook,
- GenesisID: genesisID,
- NetworkID: networkID,
- nodeInfo: nodeInfo,
+ log: log,
+ config: config,
+ phonebook: phonebook,
+ GenesisID: genesisID,
+ NetworkID: networkID,
+ nodeInfo: nodeInfo,
+ resolveSRVRecords: tools_network.ReadFromSRV,
}
wn.setup()
diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go
index 3229e2ae2..05992ec8d 100644
--- a/network/wsNetwork_test.go
+++ b/network/wsNetwork_test.go
@@ -30,6 +30,7 @@ import (
"net/http/httptest"
"net/url"
"os"
+ "regexp"
"runtime"
"sort"
"strings"
@@ -38,6 +39,9 @@ import (
"testing"
"time"
+ "github.com/algorand/go-algorand/internal/rapidgen"
+ "pgregory.net/rapid"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -58,11 +62,6 @@ const sendBufferLength = 1000
const genesisID = "go-test-network-genesis"
-func init() {
- // this allows test code to use out-of-protocol message tags and have them go through
- allowCustomTags = true
-}
-
func TestMain(m *testing.M) {
logging.Base().SetLevel(logging.Debug)
os.Exit(m.Run())
@@ -126,7 +125,7 @@ func init() {
func makeTestWebsocketNodeWithConfig(t testing.TB, conf config.Local, opts ...testWebsocketOption) *WebsocketNetwork {
log := logging.TestingLog(t)
- log.SetLevel(logging.Level(conf.BaseLoggerDebugLevel))
+ log.SetLevel(logging.Warn)
wn := &WebsocketNetwork{
log: log,
config: conf,
@@ -299,10 +298,16 @@ func netStop(t testing.TB, wn *WebsocketNetwork, name string) {
}
func setupWebsocketNetworkAB(t *testing.T, countTarget int) (*WebsocketNetwork, *WebsocketNetwork, *messageCounterHandler, func()) {
+ return setupWebsocketNetworkABwithLogger(t, countTarget, nil)
+}
+func setupWebsocketNetworkABwithLogger(t *testing.T, countTarget int, log logging.Logger) (*WebsocketNetwork, *WebsocketNetwork, *messageCounterHandler, func()) {
success := false
netA := makeTestWebsocketNode(t)
netA.config.GossipFanout = 1
+ if log != nil {
+ netA.log = log
+ }
netA.Start()
defer func() {
if !success {
@@ -310,6 +315,9 @@ func setupWebsocketNetworkAB(t *testing.T, countTarget int) (*WebsocketNetwork,
}
}()
netB := makeTestWebsocketNode(t)
+ if log != nil {
+ netB.log = log
+ }
netB.config.GossipFanout = 1
addrA, postListen := netA.Address()
require.True(t, postListen)
@@ -355,38 +363,55 @@ func TestWebsocketNetworkBasic(t *testing.T) {
}
}
-// Set up two nodes, test that B drops invalid tags when A ends them.
-func TestWebsocketNetworkBasicInvalidTags(t *testing.T) { // nolint:paralleltest // changes global variable allowCustomTags
+type mutexBuilder struct {
+ logOutput strings.Builder
+ mu deadlock.Mutex
+}
+
+func (lw *mutexBuilder) Write(p []byte) (n int, err error) {
+ lw.mu.Lock()
+ defer lw.mu.Unlock()
+ return lw.logOutput.Write(p)
+}
+func (lw *mutexBuilder) String() string {
+ lw.mu.Lock()
+ defer lw.mu.Unlock()
+ return lw.logOutput.String()
+}
+
+// Set up two nodes, test that the connection between A and B is not established.
+func TestWebsocketNetworkBasicInvalidTags(t *testing.T) { // nolint:paralleltest // changes global variable defaultSendMessageTags
partitiontest.PartitionTest(t)
- // disallow custom tags for this test
- allowCustomTags = false
- defaultSendMessageTags["XX"] = true
+ defaultSendMessageTagsOriginal := defaultSendMessageTags
+ defaultSendMessageTags = map[protocol.Tag]bool{"XX": true, "MI": true}
defer func() {
- allowCustomTags = true
- delete(defaultSendMessageTags, "XX")
+ defaultSendMessageTags = defaultSendMessageTagsOriginal
}()
+ var logOutput mutexBuilder
+ log := logging.TestingLog(t)
+ log.SetOutput(&logOutput)
+ log.SetLevel(logging.Level(logging.Debug))
+ netA, netB, counter, closeFunc := setupWebsocketNetworkABwithLogger(t, 0, log)
- netA, netB, counter, closeFunc := setupWebsocketNetworkAB(t, 2)
defer closeFunc()
- counterDone := counter.done
- // register a handler that should never get called, because the message will
- // be dropped before it gets to the handlers if allowCustomTags = false
+ // register a handler that should never get called, because the message will never be delivered
netB.RegisterHandlers([]TaggedMessageHandler{
{Tag: "XX", MessageHandler: HandlerFunc(func(msg IncomingMessage) OutgoingMessage {
require.Fail(t, "MessageHandler for out-of-protocol tag should not be called")
return OutgoingMessage{}
})}})
- // send 2 valid and 2 invalid tags
- netA.Broadcast(context.Background(), "TX", []byte("foo"), false, nil)
+ // send a message with an invalid tag which is in defaultSendMessageTags.
+ // it should not go through because the defaultSendMessageTags should not be accepted
+ // and the connection should be dropped dropped
netA.Broadcast(context.Background(), "XX", []byte("foo"), false, nil)
- netA.Broadcast(context.Background(), "TX", []byte("bar"), false, nil)
- netA.Broadcast(context.Background(), "XX", []byte("bar"), false, nil)
-
- select {
- case <-counterDone:
- case <-time.After(2 * time.Second):
- t.Errorf("timeout, count=%d, wanted 2", counter.count)
+ for p := 0; p < 100; p++ {
+ if strings.Contains(logOutput.String(), "wsPeer handleMessageOfInterest: could not unmarshall message from") {
+ break
+ }
+ time.Sleep(20 * time.Millisecond)
}
+ require.Contains(t, logOutput.String(), "wsPeer handleMessageOfInterest: could not unmarshall message from")
+ require.Equal(t, 0, counter.count)
}
// Set up two nodes, send proposal
@@ -581,7 +606,7 @@ func TestWebsocketNetworkCancel(t *testing.T) {
msgs[50].ctx = ctx
for _, peer := range peers {
- peer.sendBufferHighPrio <- sendMessages{msgs}
+ peer.sendBufferHighPrio <- sendMessages{msgs: msgs}
}
select {
@@ -1033,8 +1058,16 @@ func TestDupFilter(t *testing.T) {
netC.Start()
defer netC.Stop()
- msg := make([]byte, messageFilterSize+1)
- rand.Read(msg)
+ makeMsg := func(n int) []byte {
+ // We cannot harcode the msgSize to messageFilterSize + 1 because max allowed AV message is smaller than that.
+ // We also cannot use maxSize for PP since it's a compressible tag but trying to compress random data will expand it.
+ if messageFilterSize+1 < n {
+ n = messageFilterSize + 1
+ }
+ msg := make([]byte, n)
+ rand.Read(msg)
+ return msg
+ }
readyTimeout := time.NewTimer(2 * time.Second)
waitReady(t, netA, readyTimeout.C)
@@ -1050,9 +1083,10 @@ func TestDupFilter(t *testing.T) {
// Maybe we should just .Set(0) those counters and use them in this test?
// This exercise inbound dup detection.
- netA.Broadcast(context.Background(), protocol.AgreementVoteTag, msg, true, nil)
- netA.Broadcast(context.Background(), protocol.AgreementVoteTag, msg, true, nil)
- netA.Broadcast(context.Background(), protocol.AgreementVoteTag, msg, true, nil)
+ avMsg := makeMsg(int(protocol.AgreementVoteTag.MaxMessageSize()))
+ netA.Broadcast(context.Background(), protocol.AgreementVoteTag, avMsg, true, nil)
+ netA.Broadcast(context.Background(), protocol.AgreementVoteTag, avMsg, true, nil)
+ netA.Broadcast(context.Background(), protocol.AgreementVoteTag, avMsg, true, nil)
t.Log("A dup send done")
select {
@@ -1065,15 +1099,16 @@ func TestDupFilter(t *testing.T) {
counter.lock.Unlock()
// new message
- rand.Read(msg)
+ debugTag2Msg := makeMsg(int(debugTag2.MaxMessageSize()))
+ t.Logf("debugTag2Msg len %d", len(debugTag2Msg))
t.Log("A send, C non-dup-send")
- netA.Broadcast(context.Background(), debugTag2, msg, true, nil)
+ netA.Broadcast(context.Background(), debugTag2, debugTag2Msg, true, nil)
// B should broadcast its non-desire to receive the message again
time.Sleep(500 * time.Millisecond)
// C should now not send these
- netC.Broadcast(context.Background(), debugTag2, msg, true, nil)
- netC.Broadcast(context.Background(), debugTag2, msg, true, nil)
+ netC.Broadcast(context.Background(), debugTag2, debugTag2Msg, true, nil)
+ netC.Broadcast(context.Background(), debugTag2, debugTag2Msg, true, nil)
select {
case <-counter2.done:
@@ -1133,6 +1168,16 @@ func TestGetPeers(t *testing.T) {
expectAddrs := []string{addrA, "a", "b", "c"}
sort.Strings(expectAddrs)
assert.Equal(t, expectAddrs, peerAddrs)
+
+ // For now, PeersPhonebookArchivalNodes and PeersPhonebookRelays will return the same set of nodes
+ bPeers2 := netB.GetPeers(PeersPhonebookArchivalNodes)
+ peerAddrs2 := make([]string, len(bPeers2))
+ for pi2, peer2 := range bPeers2 {
+ peerAddrs2[pi2] = peer2.(HTTPPeer).GetAddress()
+ }
+ sort.Strings(peerAddrs2)
+ assert.Equal(t, expectAddrs, peerAddrs2)
+
}
// confirms that if the config PublicAddress is set to "testing",
@@ -2756,15 +2801,6 @@ func TestWebsocketNetworkTopicRoundtrip(t *testing.T) {
assert.Equal(t, 5, int(sum[0]))
}
-var (
- ft1 = protocol.Tag("F1")
- ft2 = protocol.Tag("F2")
- ft3 = protocol.Tag("F3")
- ft4 = protocol.Tag("F4")
-
- testTags = []protocol.Tag{ft1, ft2, ft3, ft4}
-)
-
func waitPeerInternalChanQuiet(t *testing.T, netA *WebsocketNetwork) {
// okay, but now we need to wait for asynchronous thread within netA to _apply_ the MOI to its peer for netB...
timeout := time.Now().Add(100 * time.Millisecond)
@@ -2803,7 +2839,14 @@ func waitForMOIRefreshQuiet(netB *WebsocketNetwork) {
// Set up two nodes, have one of them request a certain message tag mask, and verify the other follow that.
func TestWebsocketNetworkMessageOfInterest(t *testing.T) {
partitiontest.PartitionTest(t)
-
+ var (
+ ft1 = protocol.Tag("AV")
+ ft2 = protocol.Tag("pj")
+ ft3 = protocol.Tag("NI")
+ ft4 = protocol.Tag("TX")
+
+ testTags = []protocol.Tag{ft1, ft2, ft3, ft4}
+ )
netA := makeTestWebsocketNode(t)
netA.config.GossipFanout = 1
netA.config.EnablePingHandler = false
@@ -2817,6 +2860,12 @@ func TestWebsocketNetworkMessageOfInterest(t *testing.T) {
require.True(t, postListen)
t.Log(addrA)
netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole)
+
+ // have netB asking netA to send it ft2, deregister ping handler to make sure that we aren't exceeding the maximum MOI messagesize
+ // Max MOI size is calculated by encoding all of the valid tags, since we are using a custom tag here we must deregister one in the default set.
+ netB.DeregisterMessageInterest(protocol.PingTag)
+ netB.RegisterMessageInterest(ft2)
+
netB.Start()
defer netStop(t, netB, "B")
@@ -2868,6 +2917,10 @@ func TestWebsocketNetworkMessageOfInterest(t *testing.T) {
// have netB asking netA to send it only AgreementVoteTag and ProposalPayloadTag
netB.RegisterMessageInterest(ft2)
+ netB.DeregisterMessageInterest(ft1)
+ netB.DeregisterMessageInterest(ft3)
+ netB.DeregisterMessageInterest(ft4)
+
// send another message which we can track, so that we'll know that the first message was delivered.
netB.Broadcast(context.Background(), protocol.VoteBundleTag, []byte{0, 1, 2, 3, 4}, true, nil)
messageFilterArriveWg.Wait()
@@ -3141,11 +3194,11 @@ func TestWebsocketNetworkTXMessageOfInterestNPN(t *testing.T) {
netB.OnNetworkAdvance()
waitForMOIRefreshQuiet(netB)
- for i := 0; i < 10; i++ {
+ for i := 0; i < 100; i++ {
if atomic.LoadUint32(&netB.wantTXGossip) == uint32(wantTXGossipNo) {
break
}
- time.Sleep(time.Millisecond)
+ time.Sleep(10 * time.Millisecond)
}
require.Equal(t, uint32(wantTXGossipNo), atomic.LoadUint32(&netB.wantTXGossip))
// send another message which we can track, so that we'll know that the first message was delivered.
@@ -3246,11 +3299,11 @@ func TestWebsocketNetworkTXMessageOfInterestPN(t *testing.T) {
netB.OnNetworkAdvance()
waitForMOIRefreshQuiet(netB)
- for i := 0; i < 10; i++ {
+ for i := 0; i < 100; i++ {
if atomic.LoadUint32(&netB.wantTXGossip) == uint32(wantTXGossipYes) {
break
}
- time.Sleep(time.Millisecond)
+ time.Sleep(10 * time.Millisecond)
}
require.Equal(t, uint32(wantTXGossipYes), atomic.LoadUint32(&netB.wantTXGossip))
// send another message which we can track, so that we'll know that the first message was delivered.
@@ -3864,8 +3917,7 @@ func TestMaxHeaderSize(t *testing.T) {
netA.wsMaxHeaderBytes = wsMaxHeaderBytes
netA.wg.Add(1)
netA.tryConnect(addrB, gossipB)
- time.Sleep(250 * time.Millisecond)
- assert.Equal(t, 1, len(netA.peers))
+ require.Eventually(t, func() bool { return netA.NumPeers() == 1 }, 500*time.Millisecond, 25*time.Millisecond)
netA.removePeer(netA.peers[0], disconnectReasonNone)
assert.Zero(t, len(netA.peers))
@@ -3887,8 +3939,7 @@ func TestMaxHeaderSize(t *testing.T) {
netA.wsMaxHeaderBytes = 0
netA.wg.Add(1)
netA.tryConnect(addrB, gossipB)
- time.Sleep(250 * time.Millisecond)
- assert.Equal(t, 1, len(netA.peers))
+ require.Eventually(t, func() bool { return netA.NumPeers() == 1 }, 500*time.Millisecond, 25*time.Millisecond)
}
func TestTryConnectEarlyWrite(t *testing.T) {
@@ -3945,3 +3996,601 @@ func TestTryConnectEarlyWrite(t *testing.T) {
fmt.Printf("MI Message Count: %v\n", netA.peers[0].miMessageCount)
assert.Equal(t, uint64(1), netA.peers[0].miMessageCount)
}
+
+// Test functionality that allows a node to discard a block response that it did not request or that arrived too late.
+// Both cases are tested here by having A send unexpected, late responses to nodes B and C respectively.
+func TestDiscardUnrequestedBlockResponse(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ netA := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netA"})
+ netA.config.GossipFanout = 1
+
+ netB := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netB"})
+ netB.config.GossipFanout = 1
+
+ netC := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netC"})
+ netC.config.GossipFanout = 1
+
+ netA.Start()
+ defer netA.Stop()
+ netB.Start()
+ defer netB.Stop()
+
+ addrB, ok := netB.Address()
+ require.True(t, ok)
+ gossipB, err := netB.addrToGossipAddr(addrB)
+ require.NoError(t, err)
+
+ netA.wg.Add(1)
+ netA.tryConnect(addrB, gossipB)
+ require.Eventually(t, func() bool { return netA.NumPeers() == 1 }, 500*time.Millisecond, 25*time.Millisecond)
+
+ // send an unrequested block response
+ msg := make([]sendMessage, 1, 1)
+ msg[0] = sendMessage{
+ data: append([]byte(protocol.TopicMsgRespTag), []byte("foo")...),
+ enqueued: time.Now(),
+ peerEnqueued: time.Now(),
+ ctx: context.Background(),
+ }
+ netA.peers[0].sendBufferBulk <- sendMessages{msgs: msg}
+ require.Eventually(t,
+ func() bool {
+ return networkConnectionsDroppedTotal.GetUint64ValueForLabels(map[string]string{"reason": "unrequestedTS"}) == 1
+ },
+ 1*time.Second,
+ 50*time.Millisecond,
+ )
+
+ // Stop and confirm that we hit the case of disconnecting a peer for sending an unrequested block response
+ require.Zero(t, netB.NumPeers())
+
+ netC.Start()
+ defer netC.Stop()
+
+ addrC, ok := netC.Address()
+ require.True(t, ok)
+ gossipC, err := netC.addrToGossipAddr(addrC)
+ require.NoError(t, err)
+
+ netA.wg.Add(1)
+ netA.tryConnect(addrC, gossipC)
+ require.Eventually(t, func() bool { return netA.NumPeers() == 1 }, 500*time.Millisecond, 25*time.Millisecond)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ topics := Topics{
+ MakeTopic("requestDataType",
+ []byte("a")),
+ MakeTopic(
+ "blockData",
+ []byte("b")),
+ }
+ // Send a request for a block and cancel it after the handler has been registered
+ go func() {
+ netC.peers[0].Request(ctx, protocol.UniEnsBlockReqTag, topics)
+ }()
+ require.Eventually(
+ t,
+ func() bool {
+ netC.peersLock.RLock()
+ defer netC.peersLock.RUnlock()
+ require.NotEmpty(t, netC.peers)
+ return netC.peers[0].lenResponseChannels() > 0
+ },
+ 1*time.Second,
+ 50*time.Millisecond,
+ )
+ cancel()
+
+ // confirm that the request was cancelled but that we have registered that we have sent a request
+ require.Eventually(
+ t,
+ func() bool { return netC.peers[0].lenResponseChannels() == 0 },
+ 500*time.Millisecond,
+ 20*time.Millisecond,
+ )
+ require.Equal(t, atomic.LoadInt64(&netC.peers[0].outstandingTopicRequests), int64(1))
+
+ // Create a buffer to monitor log output from netC
+ logBuffer := bytes.NewBuffer(nil)
+ netC.log.SetOutput(logBuffer)
+
+ // send a late TS response from A -> C
+ netA.peers[0].sendBufferBulk <- sendMessages{msgs: msg}
+ require.Eventually(
+ t,
+ func() bool { return atomic.LoadInt64(&netC.peers[0].outstandingTopicRequests) == int64(0) },
+ 500*time.Millisecond,
+ 20*time.Millisecond,
+ )
+
+ // Stop and confirm that we hit the case of disconnecting a peer for sending a stale block response
+ netC.Stop()
+ lg := logBuffer.String()
+ require.Contains(t, lg, "wsPeer readLoop: received a TS response for a stale request ")
+}
+
+func customNetworkIDGen(networkID protocol.NetworkID) *rapid.Generator[protocol.NetworkID] {
+ return rapid.Custom(func(t *rapid.T) protocol.NetworkID {
+ // Unused/satisfying rapid requirement
+ rapid.String().Draw(t, "networkIDGen")
+ return networkID
+ })
+}
+
+// The hardcoded network IDs just make testing this function more difficult with no confidence gain (the custom logic
+// is already exercised well in the dnsbootstrap parsing tests).
+func nonHardcodedNetworkIDGen() *rapid.Generator[protocol.NetworkID] {
+ return rapid.OneOf(customNetworkIDGen(config.Testnet), customNetworkIDGen(config.Mainnet),
+ customNetworkIDGen(config.Devtestnet))
+}
+
+/*
+Basic exercise of the refreshRelayArchivePhonebookAddresses function, uses base / expected cases, relying on neighboring
+unit tests to cover the merge and phonebook update logic.
+*/
+func TestRefreshRelayArchivePhonebookAddresses(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ var netA *WebsocketNetwork
+ var refreshRelayDNSBootstrapID = "<network>.algorand.network?backup=<network>.algorand.net&dedup=<name>.algorand-<network>.(network|net)"
+
+ rapid.Check(t, func(t1 *rapid.T) {
+ refreshTestConf := defaultConfig
+ refreshTestConf.DNSBootstrapID = refreshRelayDNSBootstrapID
+ netA = makeTestWebsocketNodeWithConfig(t, refreshTestConf)
+ netA.NetworkID = nonHardcodedNetworkIDGen().Draw(t1, "network")
+
+ primarySRVBootstrap := strings.Replace("<network>.algorand.network", "<network>", string(netA.NetworkID), -1)
+ backupSRVBootstrap := strings.Replace("<network>.algorand.net", "<network>", string(netA.NetworkID), -1)
+ var primaryRelayResolvedRecords []string
+ var secondaryRelayResolvedRecords []string
+ var primaryArchiveResolvedRecords []string
+ var secondaryArchiveResolvedRecords []string
+
+ for _, record := range []string{"r1.algorand-<network>.network",
+ "r2.algorand-<network>.network", "r3.algorand-<network>.network"} {
+ var recordSub = strings.Replace(record, "<network>", string(netA.NetworkID), -1)
+ primaryRelayResolvedRecords = append(primaryRelayResolvedRecords, recordSub)
+ secondaryRelayResolvedRecords = append(secondaryRelayResolvedRecords, strings.Replace(recordSub, "network", "net", -1))
+ }
+
+ for _, record := range []string{"r1archive.algorand-<network>.network",
+ "r2archive.algorand-<network>.network", "r3archive.algorand-<network>.network"} {
+ var recordSub = strings.Replace(record, "<network>", string(netA.NetworkID), -1)
+ primaryArchiveResolvedRecords = append(primaryArchiveResolvedRecords, recordSub)
+ secondaryArchiveResolvedRecords = append(secondaryArchiveResolvedRecords, strings.Replace(recordSub, "network", "net", -1))
+ }
+
+ // Mock the SRV record lookup
+ netA.resolveSRVRecords = func(service string, protocol string, name string, fallbackDNSResolverAddress string,
+ secure bool) (addrs []string, err error) {
+ if service == "algobootstrap" && protocol == "tcp" && name == primarySRVBootstrap {
+ return primaryRelayResolvedRecords, nil
+ } else if service == "algobootstrap" && protocol == "tcp" && name == backupSRVBootstrap {
+ return secondaryRelayResolvedRecords, nil
+ }
+
+ if service == "archive" && protocol == "tcp" && name == primarySRVBootstrap {
+ return primaryArchiveResolvedRecords, nil
+ } else if service == "archive" && protocol == "tcp" && name == backupSRVBootstrap {
+ return secondaryArchiveResolvedRecords, nil
+ }
+
+ return
+ }
+
+ relayPeers := netA.GetPeers(PeersPhonebookRelays)
+ assert.Equal(t, 0, len(relayPeers))
+
+ archivePeers := netA.GetPeers(PeersPhonebookArchivers)
+ assert.Equal(t, 0, len(archivePeers))
+
+ netA.refreshRelayArchivePhonebookAddresses()
+
+ relayPeers = netA.GetPeers(PeersPhonebookRelays)
+
+ assert.Equal(t, 3, len(relayPeers))
+ relayAddrs := make([]string, 0, len(relayPeers))
+ for _, peer := range relayPeers {
+ relayAddrs = append(relayAddrs, peer.(HTTPPeer).GetAddress())
+ }
+
+ assert.ElementsMatch(t, primaryRelayResolvedRecords, relayAddrs)
+
+ archivePeers = netA.GetPeers(PeersPhonebookArchivers)
+
+ // For the time being, we do not dedup resolved archive nodes
+ assert.Equal(t, 6, len(archivePeers))
+
+ archiveAddrs := make([]string, 0, len(archivePeers))
+ for _, peer := range archivePeers {
+ archiveAddrs = append(archiveAddrs, peer.(HTTPPeer).GetAddress())
+ }
+
+ assert.ElementsMatch(t, append(primaryArchiveResolvedRecords, secondaryArchiveResolvedRecords...), archiveAddrs)
+
+ })
+}
+
+/*
+Exercises the updatePhonebookAddresses function, notably with different variations of valid relay and
+archival addresses.
+*/
+func TestUpdatePhonebookAddresses(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ var netA *WebsocketNetwork
+
+ rapid.Check(t, func(t1 *rapid.T) {
+ netA = makeTestWebsocketNode(t)
+ relayPeers := netA.GetPeers(PeersPhonebookRelays)
+ assert.Equal(t, 0, len(relayPeers))
+
+ archivePeers := netA.GetPeers(PeersPhonebookArchivers)
+ assert.Equal(t, 0, len(archivePeers))
+
+ domainGen := rapidgen.Domain()
+
+ // Generate between 0 and N examples - if no dups, should end up in phonebook
+ relayDomainsGen := rapid.SliceOfN(domainGen, 0, 200)
+
+ relayDomains := relayDomainsGen.Draw(t1, "relayDomains")
+
+ // Dont overlap with relays, duplicates between them not stored in phonebook as of this writing
+ archiveDomainsGen := rapid.SliceOfN(rapidgen.DomainOf(253, 63, "", relayDomains), 0, 200)
+ archiveDomains := archiveDomainsGen.Draw(t1, "archiveDomains")
+ netA.updatePhonebookAddresses(relayDomains, archiveDomains)
+
+ // Check that entries are in fact in phonebook less any duplicates
+ dedupedRelayDomains := removeDuplicateStr(relayDomains, false)
+ dedupedArchiveDomains := removeDuplicateStr(archiveDomains, false)
+
+ relayPeers = netA.GetPeers(PeersPhonebookRelays)
+ assert.Equal(t, len(dedupedRelayDomains), len(relayPeers))
+
+ relayAddrs := make([]string, 0, len(relayPeers))
+ for _, peer := range relayPeers {
+ relayAddrs = append(relayAddrs, peer.(HTTPPeer).GetAddress())
+ }
+
+ assert.ElementsMatch(t, dedupedRelayDomains, relayAddrs)
+
+ archivePeers = netA.GetPeers(PeersPhonebookArchivers)
+ assert.Equal(t, len(dedupedArchiveDomains), len(archivePeers))
+
+ archiveAddrs := make([]string, 0, len(archivePeers))
+ for _, peer := range archivePeers {
+ archiveAddrs = append(archiveAddrs, peer.(HTTPPeer).GetAddress())
+ }
+
+ assert.ElementsMatch(t, dedupedArchiveDomains, archiveAddrs)
+
+ // Generate fresh set of addresses with a duplicate from original batch if warranted,
+ // assert phonebook reflects fresh list / prior peers other than selected duplicate
+ // are not present
+ var priorRelayDomains = relayDomains
+
+ // Dont overlap with archive nodes previously specified, duplicates between them not stored in phonebook as of this writing
+ relayDomainsGen = rapid.SliceOfN(rapidgen.DomainOf(253, 63, "", archiveDomains), 0, 200)
+ relayDomains = relayDomainsGen.Draw(t1, "relayDomains")
+
+ // Randomly select a prior relay domain
+ if len(priorRelayDomains) > 0 {
+ priorIdx := rapid.IntRange(0, len(priorRelayDomains)-1).Draw(t1, "")
+ relayDomains = append(relayDomains, priorRelayDomains[priorIdx])
+ }
+
+ netA.updatePhonebookAddresses(relayDomains, nil)
+
+ // Check that entries are in fact in phonebook less any duplicates
+ dedupedRelayDomains = removeDuplicateStr(relayDomains, false)
+
+ relayPeers = netA.GetPeers(PeersPhonebookRelays)
+ assert.Equal(t, len(dedupedRelayDomains), len(relayPeers))
+
+ relayAddrs = nil
+ for _, peer := range relayPeers {
+ relayAddrs = append(relayAddrs, peer.(HTTPPeer).GetAddress())
+ }
+
+ assert.ElementsMatch(t, dedupedRelayDomains, relayAddrs)
+
+ archivePeers = netA.GetPeers(PeersPhonebookArchivers)
+ assert.Equal(t, len(dedupedArchiveDomains), len(archivePeers))
+
+ archiveAddrs = nil
+ for _, peer := range archivePeers {
+ archiveAddrs = append(archiveAddrs, peer.(HTTPPeer).GetAddress())
+ }
+
+ assert.ElementsMatch(t, dedupedArchiveDomains, archiveAddrs)
+ })
+}
+
+func removeDuplicateStr(strSlice []string, lowerCase bool) []string {
+ allKeys := make(map[string]bool)
+ var dedupStrSlice = make([]string, 0)
+ for _, item := range strSlice {
+ if lowerCase {
+ item = strings.ToLower(item)
+ }
+ if _, exists := allKeys[item]; !exists {
+ allKeys[item] = true
+ dedupStrSlice = append(dedupStrSlice, item)
+ }
+ }
+ return dedupStrSlice
+}
+
+func replaceAllIn(strSlice []string, strToReplace string, newStr string) []string {
+ var subbedStrSlice = make([]string, 0)
+ for _, item := range strSlice {
+ item = strings.ReplaceAll(item, strToReplace, newStr)
+ subbedStrSlice = append(subbedStrSlice, item)
+ }
+
+ return subbedStrSlice
+}
+
+func supportedNetworkGen() *rapid.Generator[string] {
+ return rapid.OneOf(rapid.StringMatching(string(config.Testnet)), rapid.StringMatching(string(config.Mainnet)),
+ rapid.StringMatching(string(config.Devnet)), rapid.StringMatching(string(config.Betanet)),
+ rapid.StringMatching(string(config.Alphanet)), rapid.StringMatching(string(config.Devtestnet)))
+}
+
+func TestMergePrimarySecondaryRelayAddressListsMinOverlap(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ var netA *WebsocketNetwork
+
+ rapid.Check(t, func(t1 *rapid.T) {
+ netA = makeTestWebsocketNode(t)
+
+ network := supportedNetworkGen().Draw(t1, "network")
+ dedupExp := regexp.MustCompile(strings.Replace(
+ `(algorand-<network>.(network|net))`, "<network>", network, -1))
+ domainPortGen := rapidgen.DomainWithPort()
+
+ // Generate between 0 and N examples - if no dups, should end up in phonebook
+ domainsGen := rapid.SliceOfN(domainPortGen, 0, 200)
+
+ primaryRelayAddresses := domainsGen.Draw(t1, "primaryRelayAddresses")
+ secondaryRelayAddresses := domainsGen.Draw(t1, "secondaryRelayAddresses")
+
+ mergedRelayAddresses := netA.mergePrimarySecondaryRelayAddressSlices(protocol.NetworkID(network),
+ primaryRelayAddresses, secondaryRelayAddresses, dedupExp)
+
+ expectedRelayAddresses := removeDuplicateStr(append(primaryRelayAddresses, secondaryRelayAddresses...), true)
+
+ assert.ElementsMatch(t, expectedRelayAddresses, mergedRelayAddresses)
+ })
+}
+
+type MergeTestDNSInputs struct {
+ dedupExpStr string
+
+ primaryDomainSuffix string
+
+ secondaryDomainSuffix string
+}
+
+func mergePrimarySecondaryRelayAddressListsPartialOverlapTestInputsGen() *rapid.Generator[*MergeTestDNSInputs] {
+
+ algorand0Base := rapid.Custom(func(t *rapid.T) *MergeTestDNSInputs {
+ //unused/satisfying rapid expectation
+ rapid.String().Draw(t, "algorand0Base")
+ // <network>.algorand.network?backup=<network>.algorand0.network&
+ // dedup=<name>.(algorand-<network>|n-<network>.algorand0).network
+ return &MergeTestDNSInputs{
+ dedupExpStr: "((algorand-<network>|n-<network>.algorand0).network)",
+ primaryDomainSuffix: "algorand-<network>.network",
+ secondaryDomainSuffix: "n-<network>.algorand0.network",
+ }
+ })
+
+ algorand0Inverse := rapid.Custom(func(t *rapid.T) *MergeTestDNSInputs {
+ //unused/satisfying rapid expectation
+ rapid.String().Draw(t, "algorand0Inverse")
+ // <network>.algorand0.network?backup=<network>.algorand.network&
+ // dedup=<name>.(algorand-<network>|n-<network>.algorand0).network
+ return &MergeTestDNSInputs{
+ dedupExpStr: "((algorand-<network>|n-<network>.algorand0).network)",
+ primaryDomainSuffix: "n-<network>.algorand0.network",
+ secondaryDomainSuffix: "algorand-<network>.network",
+ }
+ })
+
+ algorandNetBase := rapid.Custom(func(t *rapid.T) *MergeTestDNSInputs {
+ //unused/satisfying rapid expectation
+ rapid.String().Draw(t, "algorandNetBase")
+ //<network>.algorand.network?backup=<network>.algorand.net
+ // dedup=<name>.algorand-<network>.(network|net)
+ return &MergeTestDNSInputs{
+ dedupExpStr: "(algorand-<network>.(network|net))",
+ primaryDomainSuffix: "algorand-<network>.network",
+ secondaryDomainSuffix: "algorand-<network>.net",
+ }
+ })
+
+ algorandNetInverse := rapid.Custom(func(t *rapid.T) *MergeTestDNSInputs {
+ //unused/satisfying rapid expectation
+ rapid.String().Draw(t, "algorandNetInverse")
+ //<network>.algorand.net?backup=<network>.algorand.network" +
+ // "&dedup=<name>.algorand-<network>.(network|net)
+ return &MergeTestDNSInputs{
+ dedupExpStr: "(algorand-<network>.(network|net))",
+ primaryDomainSuffix: "algorand-<network>.net",
+ secondaryDomainSuffix: "algorand-<network>.network",
+ }
+ })
+
+ return rapid.OneOf(algorand0Base, algorand0Inverse, algorandNetBase, algorandNetInverse)
+}
+
+func TestMergePrimarySecondaryRelayAddressListsPartialOverlap(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ var netA *WebsocketNetwork
+
+ rapid.Check(t, func(t1 *rapid.T) {
+ netA = makeTestWebsocketNode(t)
+
+ network := supportedNetworkGen().Draw(t1, "network")
+ mergeTestInputs := mergePrimarySecondaryRelayAddressListsPartialOverlapTestInputsGen().Draw(t1, "mergeTestInputs")
+
+ dedupExp := regexp.MustCompile(strings.Replace(
+ mergeTestInputs.dedupExpStr, "<network>", network, -1))
+ primaryDomainSuffix := strings.Replace(
+ mergeTestInputs.primaryDomainSuffix, "<network>", network, -1)
+
+ // Generate hosts for a primary network domain
+ primaryNetworkDomainGen := rapidgen.DomainWithSuffixAndPort(primaryDomainSuffix, nil)
+ primaryDomainsGen := rapid.SliceOfN(primaryNetworkDomainGen, 0, 200)
+
+ primaryRelayAddresses := primaryDomainsGen.Draw(t1, "primaryRelayAddresses")
+
+ secondaryDomainSuffix := strings.Replace(
+ mergeTestInputs.secondaryDomainSuffix, "<network>", network, -1)
+ // Generate these addresses from primary ones, find/replace domain suffix appropriately
+ secondaryRelayAddresses := replaceAllIn(primaryRelayAddresses, primaryDomainSuffix, secondaryDomainSuffix)
+ // Add some generated addresses to secondary list - to simplify verification further down
+ // (substituting suffixes, etc), we dont want the generated addresses to duplicate any of
+ // the replaced secondary ones
+ secondaryNetworkDomainGen := rapidgen.DomainWithSuffixAndPort(secondaryDomainSuffix, secondaryRelayAddresses)
+ secondaryDomainsGen := rapid.SliceOfN(secondaryNetworkDomainGen, 0, 200)
+ generatedSecondaryRelayAddresses := secondaryDomainsGen.Draw(t1, "secondaryRelayAddresses")
+ secondaryRelayAddresses = append(secondaryRelayAddresses, generatedSecondaryRelayAddresses...)
+
+ mergedRelayAddresses := netA.mergePrimarySecondaryRelayAddressSlices(protocol.NetworkID(network),
+ primaryRelayAddresses, secondaryRelayAddresses, dedupExp)
+
+ // We expect the primary addresses to take precedence over a "matching" secondary address, randomly generated
+ // secondary addresses should be present in the merged slice
+ expectedRelayAddresses := removeDuplicateStr(append(primaryRelayAddresses, generatedSecondaryRelayAddresses...), true)
+
+ assert.ElementsMatch(t, expectedRelayAddresses, mergedRelayAddresses)
+ })
+}
+
+// Case where a "backup" network is specified, but no dedup expression is provided. Technically possible,
+// but there is little benefit vs specifying them as separate `;` separated addresses in DNSBootrstrapID.
+func TestMergePrimarySecondaryRelayAddressListsNoDedupExp(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ var netA *WebsocketNetwork
+
+ rapid.Check(t, func(t1 *rapid.T) {
+ netA = makeTestWebsocketNode(t)
+
+ network := supportedNetworkGen().Draw(t1, "network")
+ primaryDomainSuffix := strings.Replace(
+ `n-<network>.algorand0.network`, "<network>", network, -1)
+
+ // Generate hosts for a primary network domain
+ primaryNetworkDomainGen := rapidgen.DomainWithSuffixAndPort(primaryDomainSuffix, nil)
+ primaryDomainsGen := rapid.SliceOfN(primaryNetworkDomainGen, 0, 200)
+
+ primaryRelayAddresses := primaryDomainsGen.Draw(t1, "primaryRelayAddresses")
+
+ secondaryDomainSuffix := strings.Replace(
+ `algorand-<network>.network`, "<network>", network, -1)
+ // Generate these addresses from primary ones, find/replace domain suffix appropriately
+ secondaryRelayAddresses := replaceAllIn(primaryRelayAddresses, primaryDomainSuffix, secondaryDomainSuffix)
+ // Add some generated addresses to secondary list - to simplify verification further down
+ // (substituting suffixes, etc), we don't want the generated addresses to duplicate any of
+ // the replaced secondary ones
+ secondaryNetworkDomainGen := rapidgen.DomainWithSuffixAndPort(secondaryDomainSuffix, secondaryRelayAddresses)
+ secondaryDomainsGen := rapid.SliceOfN(secondaryNetworkDomainGen, 0, 200)
+ generatedSecondaryRelayAddresses := secondaryDomainsGen.Draw(t1, "secondaryRelayAddresses")
+ secondaryRelayAddresses = append(secondaryRelayAddresses, generatedSecondaryRelayAddresses...)
+
+ mergedRelayAddresses := netA.mergePrimarySecondaryRelayAddressSlices(protocol.NetworkID(network),
+ primaryRelayAddresses, secondaryRelayAddresses, nil)
+
+ // We expect non deduplication, so all addresses _should_ be present (note that no lower casing happens either)
+ expectedRelayAddresses := append(primaryRelayAddresses, secondaryRelayAddresses...)
+
+ assert.ElementsMatch(t, expectedRelayAddresses, mergedRelayAddresses)
+ })
+}
+
+// TestSendMessageCallbacks tests that the SendMessage callbacks are called correctly. These are currently used for
+// decrementing the number of bytes considered currently in flight for blockservice memcaps.
+func TestSendMessageCallbacks(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ netA, netB, _, closeFunc := setupWebsocketNetworkAB(t, 2)
+ defer closeFunc()
+
+ var counter uint64
+ require.NotZero(t, netA.NumPeers())
+
+ // peerB is netA's representation of netB and vice versa
+ peerB := netA.peers[0]
+ peerA := netB.peers[0]
+
+ // Need to create a channel so that TS messages sent by netA don't get filtered out in the readLoop
+ peerA.makeResponseChannel(1)
+
+ // The for loop simulates netA receiving 100 UE block requests from netB
+ // and goes through the actual response code path to generate and send TS responses to netB
+ for i := 0; i < 100; i++ {
+ randInt := crypto.RandUint64()%(128) + 1
+ atomic.AddUint64(&counter, randInt)
+ topic := MakeTopic("val", []byte("blah"))
+ callback := func() {
+ atomic.AddUint64(&counter, ^uint64(randInt-1))
+ }
+ msg := IncomingMessage{Sender: peerB, Tag: protocol.UniEnsBlockReqTag}
+ peerB.Respond(context.Background(), msg, OutgoingMessage{OnRelease: callback, Topics: Topics{topic}})
+ }
+ // Confirm that netB's representation netA peerB has received some requests and decremented the counter
+ // of outstanding TS requests below 0. This will be true because we never made any UE block requests, we only
+ // simulated them by manually creating a IncomingMessage with the UE tag in the loop above
+ require.Eventually(t,
+ func() bool { return atomic.LoadInt64(&peerA.outstandingTopicRequests) < 0 },
+ 500*time.Millisecond,
+ 25*time.Millisecond,
+ )
+
+ // confirm that the test counter decrements down to zero correctly through callbacks
+ require.Eventually(t,
+ func() bool { return atomic.LoadUint64(&counter) == uint64(0) },
+ 500*time.Millisecond,
+ 25*time.Millisecond,
+ )
+}
+
+func TestSendMessageCallbackDrain(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ node := makeTestWebsocketNode(t)
+ destPeer := wsPeer{
+ closing: make(chan struct{}),
+ sendBufferHighPrio: make(chan sendMessages, sendBufferLength),
+ sendBufferBulk: make(chan sendMessages, sendBufferLength),
+ conn: &nopConnSingleton,
+ }
+ node.addPeer(&destPeer)
+ node.Start()
+ defer node.Stop()
+
+ var target, counter uint64
+ // send messages to the peer that won't read them so they will sit in the sendQueue
+ for i := 0; i < 10; i++ {
+ randInt := crypto.RandUint64()%(128) + 1
+ target += randInt
+ topic := MakeTopic("val", []byte("blah"))
+ callback := func() {
+ counter += randInt
+ }
+ msg := IncomingMessage{Sender: node.peers[0], Tag: protocol.UniEnsBlockReqTag}
+ destPeer.Respond(context.Background(), msg, OutgoingMessage{OnRelease: callback, Topics: Topics{topic}})
+ }
+ require.Len(t, destPeer.sendBufferBulk, 10)
+ require.Zero(t, counter)
+ require.Positive(t, target)
+ // close the peer to trigger draining of the queue callbacks
+ destPeer.Close(time.Now().Add(time.Second))
+
+ require.Eventually(t,
+ func() bool { return target == counter },
+ 2*time.Second,
+ 50*time.Millisecond,
+ )
+}
diff --git a/network/wsPeer.go b/network/wsPeer.go
index accd06cf9..1c9aad3db 100644
--- a/network/wsPeer.go
+++ b/network/wsPeer.go
@@ -41,7 +41,8 @@ import (
"github.com/algorand/go-algorand/util/metrics"
)
-const maxMessageLength = 6 * 1024 * 1024 // Currently the biggest message is VB vote bundles. TODO: per message type size limit?
+// MaxMessageLength is the maximum length of a message that can be sent or received, exported to be used in the node.TestMaxSizesCorrect test
+const MaxMessageLength = 6 * 1024 * 1024 // Currently the biggest message is VB vote bundles.
const averageMessageLength = 2 * 1024 // Most of the messages are smaller than this size, which makes it into a good base allocation.
// This parameter controls how many messages from a single peer can be
@@ -52,9 +53,6 @@ const msgsInReadBufferPerPeer = 10
var tagStringList []string
-// allowCustomTags is set by tests to allow non-protocol-defined message tags. It is false in non-test code.
-var allowCustomTags bool
-
func init() {
tagStringList = make([]string, len(protocol.TagList))
for i, t := range protocol.TagList {
@@ -168,6 +166,7 @@ const disconnectRequestReceived disconnectReason = "DisconnectRequest"
const disconnectStaleWrite disconnectReason = "DisconnectStaleWrite"
const disconnectDuplicateConnection disconnectReason = "DuplicateConnection"
const disconnectBadIdentityData disconnectReason = "BadIdentityData"
+const disconnectUnexpectedTopicResp disconnectReason = "UnexpectedTopicResp"
// Response is the structure holding the response from the server
type Response struct {
@@ -176,6 +175,9 @@ type Response struct {
type sendMessages struct {
msgs []sendMessage
+
+ // onRelease function is called when the message is released either by being sent or discarded.
+ onRelease func()
}
type wsPeer struct {
@@ -185,6 +187,10 @@ type wsPeer struct {
// we want this to be a 64-bit aligned for atomics support on 32bit platforms.
lastPacketTime int64
+ // outstandingTopicRequests is an atomic counter for the number of outstanding block requests we've made out to this peer
+ // if a peer sends more blocks than we've requested, we'll disconnect from it.
+ outstandingTopicRequests int64
+
// intermittentOutgoingMessageEnqueueTime contains the UnixNano of the message's enqueue time that is currently being written to the
// peer, or zero if no message is being written.
intermittentOutgoingMessageEnqueueTime int64
@@ -309,7 +315,7 @@ type UnicastPeer interface {
// Version returns the matching version from network.SupportedProtocolVersions
Version() string
Request(ctx context.Context, tag Tag, topics Topics) (resp *Response, e error)
- Respond(ctx context.Context, reqMsg IncomingMessage, topics Topics) (e error)
+ Respond(ctx context.Context, reqMsg IncomingMessage, outMsg OutgoingMessage) (e error)
}
// TCPInfoUnicastPeer exposes information about the underlying connection if available on the platform
@@ -385,7 +391,7 @@ func (wp *wsPeer) GetUnderlyingConnTCPInfo() (*util.TCPInfo, error) {
}
// Respond sends the response of a request message
-func (wp *wsPeer) Respond(ctx context.Context, reqMsg IncomingMessage, responseTopics Topics) (e error) {
+func (wp *wsPeer) Respond(ctx context.Context, reqMsg IncomingMessage, outMsg OutgoingMessage) (e error) {
// Get the hash/key of the request message
requestHash := hashTopics(reqMsg.Data)
@@ -393,7 +399,7 @@ func (wp *wsPeer) Respond(ctx context.Context, reqMsg IncomingMessage, responseT
// Add the request hash
requestHashData := make([]byte, binary.MaxVarintLen64)
binary.PutUvarint(requestHashData, requestHash)
- responseTopics = append(responseTopics, Topic{key: requestHashKey, data: requestHashData})
+ responseTopics := append(outMsg.Topics, Topic{key: requestHashKey, data: requestHashData})
// Serialize the topics
serializedMsg := responseTopics.MarshallTopics()
@@ -408,11 +414,17 @@ func (wp *wsPeer) Respond(ctx context.Context, reqMsg IncomingMessage, responseT
}
select {
- case wp.sendBufferBulk <- sendMessages{msgs: msg}:
+ case wp.sendBufferBulk <- sendMessages{msgs: msg, onRelease: outMsg.OnRelease}:
case <-wp.closing:
+ if outMsg.OnRelease != nil {
+ outMsg.OnRelease()
+ }
wp.net.log.Debugf("peer closing %s", wp.conn.RemoteAddr().String())
return
case <-ctx.Done():
+ if outMsg.OnRelease != nil {
+ outMsg.OnRelease()
+ }
return ctx.Err()
}
return nil
@@ -472,8 +484,8 @@ func (wp *wsPeer) readLoop() {
defer func() {
wp.readLoopCleanup(cleanupCloseError)
}()
- wp.conn.SetReadLimit(maxMessageLength)
- slurper := MakeLimitedReaderSlurper(averageMessageLength, maxMessageLength)
+ wp.conn.SetReadLimit(MaxMessageLength)
+ slurper := MakeLimitedReaderSlurper(averageMessageLength, MaxMessageLength)
dataConverter := makeWsPeerMsgDataConverter(wp)
for {
@@ -505,7 +517,31 @@ func (wp *wsPeer) readLoop() {
return
}
msg.Tag = Tag(string(tag[:]))
- slurper.Reset()
+
+ // Skip the message if it's a response to a request we didn't make or has timed out
+ if msg.Tag == protocol.TopicMsgRespTag && wp.lenResponseChannels() == 0 {
+ atomic.AddInt64(&wp.outstandingTopicRequests, -1)
+
+ // This peers has sent us more responses than we have requested. This is a protocol violation and we should disconnect.
+ if atomic.LoadInt64(&wp.outstandingTopicRequests) < 0 {
+ wp.net.log.Errorf("wsPeer readloop: peer %s sent TS response without a request", wp.conn.RemoteAddr().String())
+ networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "unrequestedTS"})
+ cleanupCloseError = disconnectUnexpectedTopicResp
+ return
+ }
+ var n int64
+ // Peer sent us a response to a request we made but we've already timed out -- discard
+ n, err = io.Copy(io.Discard, reader)
+ if err != nil {
+ wp.net.log.Infof("wsPeer readloop: could not discard timed-out TS message from %s : %s", wp.conn.RemoteAddr().String(), err)
+ wp.reportReadErr(err)
+ return
+ }
+ wp.net.log.Warnf("wsPeer readLoop: received a TS response for a stale request from %s. %d bytes discarded", wp.conn.RemoteAddr().String(), n)
+ continue
+ }
+
+ slurper.Reset(uint64(msg.Tag.MaxMessageSize()))
err = slurper.Read(reader)
if err != nil {
wp.reportReadErr(err)
@@ -538,11 +574,16 @@ func (wp *wsPeer) readLoop() {
case protocol.MsgOfInterestTag:
// try to decode the message-of-interest
atomic.AddUint64(&wp.miMessageCount, 1)
- if wp.handleMessageOfInterest(msg) {
+ if close, reason := wp.handleMessageOfInterest(msg); close {
+ cleanupCloseError = reason
+ if reason == disconnectBadData {
+ networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "protocol"})
+ }
return
}
continue
case protocol.TopicMsgRespTag: // Handle Topic message
+ atomic.AddInt64(&wp.outstandingTopicRequests, -1)
topics, err := UnmarshallTopics(msg.Data)
if err != nil {
wp.net.log.Warnf("wsPeer readLoop: could not read the message from: %s %s", wp.conn.RemoteAddr().String(), err)
@@ -583,9 +624,8 @@ func (wp *wsPeer) readLoop() {
default: // unrecognized tag
unknownProtocolTagMessagesTotal.Inc(nil)
atomic.AddUint64(&wp.unkMessageCount, 1)
- if !allowCustomTags {
- continue // drop message, skip adding it to queue
- }
+ continue // drop message, skip adding it to queue
+ // TODO: should disconnect here?
}
if len(msg.Data) > 0 && wp.incomingMsgFilter != nil && dedupSafeTag(msg.Tag) {
if wp.incomingMsgFilter.CheckIncomingMessage(msg.Tag, msg.Data, true, true) {
@@ -616,13 +656,14 @@ func (wp *wsPeer) readLoop() {
}
}
-func (wp *wsPeer) handleMessageOfInterest(msg IncomingMessage) (shutdown bool) {
- shutdown = false
+func (wp *wsPeer) handleMessageOfInterest(msg IncomingMessage) (close bool, reason disconnectReason) {
+ close = false
+ reason = disconnectReasonNone
// decode the message, and ensure it's a valid message.
msgTagsMap, err := unmarshallMessageOfInterest(msg.Data)
if err != nil {
wp.net.log.Warnf("wsPeer handleMessageOfInterest: could not unmarshall message from: %s %v", wp.conn.RemoteAddr().String(), err)
- return
+ return true, disconnectBadData
}
msgs := make([]sendMessage, 1, 1)
msgs[0] = sendMessage{
@@ -641,7 +682,7 @@ func (wp *wsPeer) handleMessageOfInterest(msg IncomingMessage) (shutdown bool) {
return
case <-wp.closing:
wp.net.log.Debugf("peer closing %s", wp.conn.RemoteAddr().String())
- shutdown = true
+ return true, disconnectReasonNone
default:
}
@@ -650,7 +691,7 @@ func (wp *wsPeer) handleMessageOfInterest(msg IncomingMessage) (shutdown bool) {
case wp.sendBufferBulk <- sm:
case <-wp.closing:
wp.net.log.Debugf("peer closing %s", wp.conn.RemoteAddr().String())
- shutdown = true
+ return true, disconnectReasonNone
}
return
}
@@ -683,6 +724,9 @@ func (wp *wsPeer) handleFilterMessage(msg IncomingMessage) {
}
func (wp *wsPeer) writeLoopSend(msgs sendMessages) disconnectReason {
+ if msgs.onRelease != nil {
+ defer msgs.onRelease()
+ }
for _, msg := range msgs.msgs {
select {
case <-msg.ctx.Done():
@@ -700,8 +744,8 @@ func (wp *wsPeer) writeLoopSend(msgs sendMessages) disconnectReason {
}
func (wp *wsPeer) writeLoopSendMsg(msg sendMessage) disconnectReason {
- if len(msg.data) > maxMessageLength {
- wp.net.log.Errorf("trying to send a message longer than we would receive: %d > %d tag=%s", len(msg.data), maxMessageLength, string(msg.data[0:2]))
+ if len(msg.data) > MaxMessageLength {
+ wp.net.log.Errorf("trying to send a message longer than we would receive: %d > %d tag=%s", len(msg.data), MaxMessageLength, string(msg.data[0:2]))
// just drop it, don't break the connection
return disconnectReasonNone
}
@@ -832,7 +876,8 @@ func (wp *wsPeer) writeNonBlockMsgs(ctx context.Context, data [][]byte, highPrio
return false
}
-const pingLength = 8
+// PingLength is the fixed length of ping message, exported to be used in the node.TestMaxSizesCorrect test
+const PingLength = 8
const maxPingWait = 60 * time.Second
// sendPing sends a ping block to the peer.
@@ -846,7 +891,7 @@ func (wp *wsPeer) sendPing() bool {
}
tagBytes := []byte(protocol.PingTag)
- mbytes := make([]byte, len(tagBytes)+pingLength)
+ mbytes := make([]byte, len(tagBytes)+PingLength)
copy(mbytes, tagBytes)
crypto.RandBytes(mbytes[len(tagBytes):])
wp.pingData = mbytes[len(tagBytes):]
@@ -890,6 +935,21 @@ func (wp *wsPeer) Close(deadline time.Time) {
wp.net.log.Infof("failed to CloseWithoutFlush to connection for %s", wp.conn.RemoteAddr().String())
}
}
+
+ // We need to loop through all of the messages with callbacks still in the send queue and call them
+ // to ensure that state of counters such as wsBlockBytesUsed is correct.
+L:
+ for {
+ select {
+ case msgs := <-wp.sendBufferBulk:
+ if msgs.onRelease != nil {
+ msgs.onRelease()
+ }
+ default:
+ break L
+ }
+
+ }
// now call all registered closers
for _, f := range wp.closers {
f()
@@ -923,12 +983,20 @@ func (wp *wsPeer) getRequestNonce() []byte {
return buf
}
+// MakeNonceTopic returns a topic with the nonce as the data
+// exported for testing purposes
+func MakeNonceTopic(nonce uint64) Topic {
+ buf := make([]byte, binary.MaxVarintLen64)
+ binary.PutUvarint(buf, nonce)
+ return Topic{key: "nonce", data: buf}
+}
+
// Request submits the request to the server, waits for a response
func (wp *wsPeer) Request(ctx context.Context, tag Tag, topics Topics) (resp *Response, e error) {
- // Add nonce as a topic
- nonce := wp.getRequestNonce()
- topics = append(topics, Topic{key: "nonce", data: nonce})
+ // Add nonce, stored on the wsPeer as the topic
+ nonceTopic := MakeNonceTopic(atomic.AddUint64(&wp.requestNonce, 1))
+ topics = append(topics, nonceTopic)
// serialize the topics
serializedMsg := topics.MarshallTopics()
@@ -949,6 +1017,7 @@ func (wp *wsPeer) Request(ctx context.Context, tag Tag, topics Topics) (resp *Re
ctx: context.Background()}
select {
case wp.sendBufferBulk <- sendMessages{msgs: msg}:
+ atomic.AddInt64(&wp.outstandingTopicRequests, 1)
case <-wp.closing:
e = fmt.Errorf("peer closing %s", wp.conn.RemoteAddr().String())
return
@@ -976,6 +1045,12 @@ func (wp *wsPeer) makeResponseChannel(key uint64) (responseChannel chan *Respons
return newChan
}
+func (wp *wsPeer) lenResponseChannels() int {
+ wp.responseChannelsMutex.Lock()
+ defer wp.responseChannelsMutex.Unlock()
+ return len(wp.responseChannels)
+}
+
// getAndRemoveResponseChannel returns the channel and deletes the channel from the map
func (wp *wsPeer) getAndRemoveResponseChannel(key uint64) (respChan chan *Response, found bool) {
wp.responseChannelsMutex.Lock()
diff --git a/network/wsPeer_test.go b/network/wsPeer_test.go
index ceb2121aa..4853b95e3 100644
--- a/network/wsPeer_test.go
+++ b/network/wsPeer_test.go
@@ -270,6 +270,9 @@ func getProtocolTags(t *testing.T) []string {
for _, spec := range genDecl.Specs {
if valueSpec, ok := spec.(*ast.ValueSpec); ok {
for _, n := range valueSpec.Names {
+ if strings.HasSuffix(n.Name, "MaxSize") || n.Name == "TagLength" {
+ continue
+ }
declaredTags = append(declaredTags, n.Name)
}
}
diff --git a/node/error.go b/node/error.go
index d177f0c87..f3c3e05ea 100644
--- a/node/error.go
+++ b/node/error.go
@@ -62,3 +62,26 @@ func (e *CatchpointUnableToStartError) Error() string {
e.catchpointRequested,
e.catchpointRunning)
}
+
+// StartCatchpointError is returned when the catchpoint service cannot start up.
+type StartCatchpointError struct {
+ catchpointRequested string
+ err error
+}
+
+// MakeStartCatchpointError creates a StartCatchpointError for a given catchpoint
+func MakeStartCatchpointError(catchpointRequested string, err error) *StartCatchpointError {
+ return &StartCatchpointError{
+ catchpointRequested: catchpointRequested,
+ err: err,
+ }
+}
+
+// Error satisfies the builtin interface `error`
+func (e *StartCatchpointError) Error() string {
+ return fmt.Sprintf(
+ "unable to start catchpoint service for requested catchpoint %s: %s",
+ e.catchpointRequested,
+ e.err,
+ )
+}
diff --git a/node/follower_node.go b/node/follower_node.go
index ae770c711..5ac3ca6c0 100644
--- a/node/follower_node.go
+++ b/node/follower_node.go
@@ -186,7 +186,7 @@ func (node *AlgorandFollowerNode) Start() {
if node.catchpointCatchupService != nil {
startNetwork()
- node.catchpointCatchupService.Start(node.ctx)
+ _ = node.catchpointCatchupService.Start(node.ctx)
} else {
node.catchupService.Start()
node.blockService.Start()
@@ -349,7 +349,11 @@ func (node *AlgorandFollowerNode) StartCatchup(catchpoint string) error {
node.log.Warnf("unable to create catchpoint catchup service : %v", err)
return err
}
- node.catchpointCatchupService.Start(node.ctx)
+ err = node.catchpointCatchupService.Start(node.ctx)
+ if err != nil {
+ node.log.Warnf(err.Error())
+ return MakeStartCatchpointError(catchpoint, err)
+ }
node.log.Infof("starting catching up toward catchpoint %s", catchpoint)
return nil
}
diff --git a/node/follower_node_test.go b/node/follower_node_test.go
index 17dbd52ca..192b333be 100644
--- a/node/follower_node_test.go
+++ b/node/follower_node_test.go
@@ -47,13 +47,13 @@ func followNodeDefaultGenesis() bookkeeping.Genesis {
Allocation: []bookkeeping.GenesisAllocation{
{
Address: poolAddr.String(),
- State: basics.AccountData{
+ State: bookkeeping.GenesisAccountData{
MicroAlgos: basics.MicroAlgos{Raw: 1000000000},
},
},
{
Address: sinkAddr.String(),
- State: basics.AccountData{
+ State: bookkeeping.GenesisAccountData{
MicroAlgos: basics.MicroAlgos{Raw: 1000000},
},
},
diff --git a/node/indexer/db.go b/node/indexer/db.go
deleted file mode 100644
index a46106af0..000000000
--- a/node/indexer/db.go
+++ /dev/null
@@ -1,279 +0,0 @@
-// Copyright (C) 2019-2023 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package indexer
-
-import (
- "context"
- "database/sql"
- "fmt"
-
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/util/db"
-)
-
-const (
- dbName = "indexer.sqlite"
- maxRows = 100
-)
-
-var schema = `
- CREATE TABLE IF NOT EXISTS transactions(
- txid CHAR(52) PRIMARY KEY NOT NULL,
- from_addr CHAR(58) DEFAULT NULL,
- to_addr CHAR(58) DEFAULT NULL,
- round INTEGER DEFAULT NULL,
- created_at INTEGER
- );
-
- CREATE TABLE IF NOT EXISTS params(
- k CHAR(15) PRIMARY KEY DEFAULT NULL,
- v INTEGER DEFAULT NULL,
- UNIQUE (k)
- );
-
- INSERT OR IGNORE INTO params (k, v) VALUES ('maxRound', 1);
-
- CREATE INDEX IF NOT EXISTS idx ON transactions (
- created_at DESC,
- from_addr,
- to_addr
- );
-`
-
-// Transaction represents a transaction in the system
-type Transaction struct {
- TXID string
- From string `db:"from_addr_r"`
- To string `db:"to_addr_r"`
- Round uint32
- CreatedAt uint32 `db:"created_at"`
-}
-
-// DB is a the db access layer for Indexer
-type DB struct {
- // DB Accessors
- dbr db.Accessor
- dbw db.Accessor
-
- // DBPath holds the db file path
- DBPath string
-}
-
-// MakeIndexerDB takes the db path, a bool for inMemory and returns the IndexerDB control obj
-func MakeIndexerDB(dbPath string, inMemory bool) (*DB, error) {
- idb := &DB{}
-
- idb.DBPath = dbPath + "/" + dbName
-
- dbr, err := db.MakeAccessor(idb.DBPath, true, inMemory)
- if err != nil {
- return &DB{}, err
- }
- idb.dbr = dbr
-
- dbw, err := db.MakeAccessor(idb.DBPath, false, inMemory)
- if err != nil {
- return &DB{}, err
- }
- idb.dbw = dbw
-
- _, err = dbw.Handle.Exec(schema)
- if err != nil {
- return &DB{}, err
- }
-
- return idb, nil
-}
-
-// AddBlock takes an Algorand block and stores its transactions in the DB.
-func (idb *DB) AddBlock(b bookkeeping.Block) error {
- err := idb.dbw.Atomic(func(ctx context.Context, tx *sql.Tx) error {
-
- // Get last block
- rnd, err := idb.MaxRound()
- if err != nil {
- return err
- }
-
- if uint64(b.Round()) != rnd+1 {
- return fmt.Errorf("tryign to add a future block %d, where the last one is %d", b.Round(), rnd)
- }
-
- stmt, err := tx.Prepare("INSERT INTO transactions (txid, from_addr, to_addr, round, created_at) VALUES($1, $2, $3, $4, $5);")
- if err != nil {
- return err
- }
- defer stmt.Close()
-
- payset, err := b.DecodePaysetFlat()
- if err != nil {
- return err
- }
- for _, txad := range payset {
- txn := txad.SignedTxn
- _, err = stmt.Exec(txn.ID().String(), txn.Txn.Sender.String(), txn.Txn.GetReceiverAddress().String(), b.Round(), b.TimeStamp)
- if err != nil {
- return err
- }
- }
-
- stmt2, err := tx.Prepare("UPDATE params SET v = $1 WHERE k = 'maxRound';")
- if err != nil {
- return err
- }
- defer stmt2.Close()
-
- _, err = stmt2.Exec(b.Round())
- if err != nil {
- return err
- }
-
- return nil
- })
-
- return err
-}
-
-// GetTransactionByID takes a transaction ID and returns its transaction record
-func (idb *DB) GetTransactionByID(txid string) (Transaction, error) {
- query := `
- SELECT
- txid,
- from_addr,
- to_addr,
- round,
- created_at
- FROM
- transactions
- WHERE
- txid = $1
- `
-
- var txn Transaction
- if err := idb.dbr.Handle.QueryRow(query, txid).Scan(&txn.TXID, &txn.From, &txn.To, &txn.Round, &txn.CreatedAt); err != nil {
- return Transaction{}, err
- }
-
- return txn, nil
-}
-
-// GetTransactionsRoundsByAddr takes an address and returns all its transaction rounds records
-// if top is 0, it will return 25 transactions by default
-func (idb *DB) GetTransactionsRoundsByAddr(addr string, top uint64) ([]uint64, error) {
- query := `
- SELECT DISTINCT
- round
- FROM
- transactions
- WHERE
- from_addr = $1 OR to_addr = $1
- ORDER BY created_at DESC
- LIMIT $2;
- `
-
- // limit
- if top == 0 {
- top = maxRows
- }
-
- var rounds []uint64
- var rnd uint64
- rows, err := idb.dbr.Handle.Query(query, addr, top)
- if err != nil {
- return nil, err
- }
-
- defer rows.Close()
-
- for rows.Next() {
- err := rows.Scan(&rnd)
- if err != nil {
- return nil, err
- }
- rounds = append(rounds, rnd)
- }
-
- err = rows.Err()
-
- if err != nil {
- return nil, err
- }
-
- return rounds, nil
-}
-
-// GetTransactionsRoundsByAddrAndDate takes an address and a date range (as seconds from epoch) and returns all
-// of its transaction rounds records.
-// if top is 0, it will return 100 transactions by default
-func (idb *DB) GetTransactionsRoundsByAddrAndDate(addr string, top uint64, from, to int64) ([]uint64, error) {
- query := `
- SELECT DISTINCT
- round
- FROM
- transactions
- WHERE
- created_at > $1 AND created_at < $2
- AND
- (from_addr = $3 OR to_addr = $3)
- LIMIT $4;
- `
-
- // limit
- if top == 0 {
- top = maxRows
- }
-
- var rounds []uint64
- var rnd uint64
- rows, err := idb.dbr.Handle.Query(query, from, to, addr, top)
- if err != nil {
- return nil, err
- }
- defer rows.Close()
-
- for rows.Next() {
- err := rows.Scan(&rnd)
- if err != nil {
- return nil, err
- }
-
- rounds = append(rounds, rnd)
- }
-
- err = rows.Err()
-
- if err != nil {
- return nil, err
- }
-
- return rounds, nil
-}
-
-// MaxRound returns the latest block in the DB
-func (idb *DB) MaxRound() (uint64, error) {
- var rnd uint64
- if err := idb.dbr.Handle.QueryRow("SELECT v from params where k = 'maxRound'").Scan(&rnd); err != nil {
- return 0, err
- }
- return rnd, nil
-}
-
-// Close closes the db connections
-func (idb *DB) Close() {
- idb.dbw.Close()
- idb.dbr.Close()
-}
diff --git a/node/indexer/indexer.go b/node/indexer/indexer.go
deleted file mode 100644
index 064685fe7..000000000
--- a/node/indexer/indexer.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright (C) 2019-2023 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package indexer
-
-import (
- "context"
- "time"
-
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/logging"
-)
-
-// Ledger interface to make testing easier
-type Ledger interface {
- Block(rnd basics.Round) (blk bookkeeping.Block, err error)
- Wait(r basics.Round) chan struct{}
-}
-
-// Indexer keeps track of transactions and their senders
-// to enable quick retrieval.
-type Indexer struct {
- IDB *DB
-
- l Ledger
-
- ctx context.Context
- cancelCtx context.CancelFunc
-}
-
-// MakeIndexer makes a new indexer.
-func MakeIndexer(dataDir string, ledger Ledger, inMemory bool) (*Indexer, error) {
- orm, err := MakeIndexerDB(dataDir, inMemory)
- if err != nil {
- return &Indexer{}, err
- }
-
- ctx, cancel := context.WithCancel(context.Background())
-
- return &Indexer{
- IDB: orm,
- l: ledger,
- ctx: ctx,
- cancelCtx: cancel,
- }, nil
-}
-
-// GetRoundByTXID takes a transactionID an returns its round number
-func (idx *Indexer) GetRoundByTXID(txID string) (uint64, error) {
- txn, err := idx.IDB.GetTransactionByID(txID)
- if err != nil {
- return 0, err
- }
- return uint64(txn.Round), nil
-}
-
-// GetRoundsByAddressAndDate takes an address, date range and maximum number of txns to return , and returns all
-// blocks that contain the relevant transaction. if top is 0, it defaults to 100.
-func (idx *Indexer) GetRoundsByAddressAndDate(addr string, top uint64, from, to int64) ([]uint64, error) {
- rounds, err := idx.IDB.GetTransactionsRoundsByAddrAndDate(addr, top, from, to)
- if err != nil {
- return nil, err
- }
- return rounds, nil
-}
-
-// GetRoundsByAddress takes an address and the number of transactions to return
-// and returns all blocks that contain transaction where the address was the
-// sender or the receiver.
-func (idx *Indexer) GetRoundsByAddress(addr string, top uint64) ([]uint64, error) {
- rounds, err := idx.IDB.GetTransactionsRoundsByAddr(addr, top)
- if err != nil {
- return nil, err
- }
- return rounds, nil
-}
-
-// NewBlock takes a block and updates the DB
-// If the block exists, return nil.the block must be the next block
-func (idx *Indexer) NewBlock(b bookkeeping.Block) error {
- // Get last block
- rnd, err := idx.LastBlock()
- if err != nil {
- return err
- }
-
- if b.Round() <= rnd {
- return nil
- }
-
- err = idx.IDB.AddBlock(b)
- if err != nil {
- return err
- }
- return nil
-}
-
-// Start starts the indexer
-func (idx *Indexer) Start() error {
- round, err := idx.LastBlock()
- if err != nil {
- return err
- }
-
- go idx.update(round)
-
- return nil
-}
-
-func (idx *Indexer) update(round basics.Round) {
- for {
- select {
- // Wait on the block
- case <-idx.l.Wait(round + 1):
- b, err := idx.l.Block(round + 1)
- if err != nil {
- logging.Base().Errorf("failed fetching block %d, trying again in 0.5 seconds", round+1)
- time.Sleep(time.Millisecond * 500)
- } else {
- err = idx.NewBlock(b)
- if err != nil {
- logging.Base().Errorf("failed write block %d, trying again in 0.5 seconds", round+1)
- time.Sleep(time.Millisecond * 500)
- } else {
- round++
- }
- }
-
- case <-idx.ctx.Done():
- return
- }
- }
-}
-
-// LastBlock returns the last block the indexer is aware of
-func (idx *Indexer) LastBlock() (basics.Round, error) {
- rnd, err := idx.IDB.MaxRound()
- if err != nil {
- return 0, err
- }
- return basics.Round(rnd), nil
-}
-
-// Shutdown closes the indexer
-func (idx *Indexer) Shutdown() {
- idx.cancelCtx()
- idx.IDB.Close()
-}
diff --git a/node/indexer/indexer_test.go b/node/indexer/indexer_test.go
deleted file mode 100644
index ff1e398a4..000000000
--- a/node/indexer/indexer_test.go
+++ /dev/null
@@ -1,287 +0,0 @@
-// Copyright (C) 2019-2023 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package indexer
-
-import (
- "math/rand"
- "os"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
- "github.com/stretchr/testify/suite"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/test/partitiontest"
-)
-
-const testGenesisID string = "foo"
-
-var genesisHash = crypto.Digest{0x1, 0x2, 0x3}
-
-type IndexSuite struct {
- suite.Suite
- idx *Indexer
-
- txns []transactions.SignedTxn
- secrets []*crypto.SignatureSecrets
- addrs []basics.Address
-}
-
-func (s *IndexSuite) SetupSuite() {
- var err error
- s.idx, err = MakeIndexer(".", &TestLedger{}, true)
- require.NoError(s.T(), err)
-
- numOfTransactions := 5000
- numOfAccounts := 10
- numOfBlocks := 10
-
- require.Equal(s.T(), 0, numOfTransactions%numOfBlocks, "Number of transaction must be "+
- "divisible by the number of blocks")
-
- _, s.txns, s.secrets, s.addrs = generateTestObjects(numOfTransactions, numOfAccounts)
-
- // Gen some simple blocks
- for i := 0; i < numOfBlocks; i++ {
- var txnEnc []transactions.SignedTxnInBlock
- b := bookkeeping.Block{
- BlockHeader: bookkeeping.BlockHeader{
- Round: basics.Round(uint64(i + 2)),
- TimeStamp: time.Now().Unix(),
- GenesisID: testGenesisID,
- GenesisHash: genesisHash,
- UpgradeState: bookkeeping.UpgradeState{
- CurrentProtocol: protocol.ConsensusFuture,
- },
- },
- }
-
- chunkSize := numOfTransactions / numOfBlocks
- for t := i * chunkSize; t < (i+1)*chunkSize; t++ {
- txid, err := b.EncodeSignedTxn(s.txns[t], transactions.ApplyData{})
- require.NoError(s.T(), err)
- txnEnc = append(txnEnc, txid)
- }
-
- b.Payset = txnEnc
- err = s.idx.NewBlock(b)
- require.NoError(s.T(), err)
-
- r, err := s.idx.LastBlock()
- require.NoError(s.T(), err)
- require.Equal(s.T(), basics.Round(i+2), r)
- }
-}
-
-func (s *IndexSuite) TearDownSuite() {
- s.idx.Shutdown()
- err := os.RemoveAll(s.idx.IDB.DBPath)
- require.NoError(s.T(), err)
-}
-
-func (s *IndexSuite) TestIndexer_GetRoundByTXID() {
- txID := s.txns[0].ID().String()
-
- _, err := s.idx.GetRoundByTXID(txID)
-
- require.NoError(s.T(), err)
-
-}
-
-func (s *IndexSuite) TestIndexer_GetRoundsByAddress() {
- var count int
-
- res, err := s.idx.GetRoundsByAddress(s.addrs[0].GetUserAddress(), uint64(count))
- require.NoError(s.T(), err)
-
- // Should be equal to the number of blocks.
- require.Equal(s.T(), 10, len(res))
-}
-
-func (s *IndexSuite) TestIndexer_DuplicateRounds() {
- // Get Transactions (we're guaranteed to have more than one txn per address per block)
-
- res, err := s.idx.GetRoundsByAddress(s.addrs[0].String(), 100)
-
- require.NoError(s.T(), err)
-
- // Check for dups
- seen := make(map[uint64]bool)
- for _, b := range res {
- require.False(s.T(), seen[b])
- seen[b] = true
- }
-}
-
-func (s *IndexSuite) TestIndexer_Asset() {
- query := "SELECT txid from transactions where (from_addr = $1 OR to_addr = $1)"
- rows, err := s.idx.IDB.dbr.Handle.Query(query, s.addrs[0].String())
- require.NoError(s.T(), err)
- defer rows.Close()
-
- txids := make(map[string]bool, 0)
- var txid string
- for rows.Next() {
- err := rows.Scan(&txid)
- require.NoError(s.T(), err)
- txids[txid] = true
- }
-
- // make sure all txns are in list
- for _, txn := range s.txns {
- if txn.Txn.Type == protocol.AssetTransferTx {
- if txn.Txn.Sender == s.addrs[0] || txn.Txn.AssetReceiver == s.addrs[0] {
- require.True(s.T(), txids[txn.ID().String()])
- }
- }
- }
-
-}
-
-func TestExampleTestSuite(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- suite.Run(t, new(IndexSuite))
-}
-
-func BenchmarkORM_AddTransactions(b *testing.B) {
- idx, _ := MakeIndexer(".", &TestLedger{}, false)
- _, txns, _, _ := generateTestObjects(5000, 100)
- b.ResetTimer()
-
- for n := 0; n < b.N; n++ {
- var txnEnc []transactions.SignedTxnInBlock
- blk := bookkeeping.Block{
- BlockHeader: bookkeeping.BlockHeader{
- Round: basics.Round(n),
- TimeStamp: time.Now().Unix(),
- },
- }
- for _, tx := range txns {
- txib, err := blk.EncodeSignedTxn(tx, transactions.ApplyData{})
- require.NoError(b, err)
- txnEnc = append(txnEnc, txib)
- }
- blk.Payset = txnEnc
- idx.NewBlock(blk)
- }
-}
-
-func BenchmarkORM_AddTransactions2(b *testing.B) {
- numTxn := 5000
- idx, _ := MakeIndexer(".", &TestLedger{}, false)
- _, txns, _, _ := generateTestObjects(numTxn, 100)
- b.ResetTimer()
-
- t := 5
- for n := 0; n < b.N; n++ {
- for i := 0; i < t; i++ {
- var txnEnc []transactions.SignedTxnInBlock
- blk := bookkeeping.Block{
- BlockHeader: bookkeeping.BlockHeader{
- Round: basics.Round(i),
- TimeStamp: time.Now().Unix(),
- },
- }
- bTxns := txns[numTxn/t*i : numTxn/t*(i+1)]
- for _, tx := range bTxns {
- txib, err := blk.EncodeSignedTxn(tx, transactions.ApplyData{})
- require.NoError(b, err)
- txnEnc = append(txnEnc, txib)
- }
- blk.Payset = txnEnc
- idx.NewBlock(blk)
- }
- }
-}
-
-func generateTestObjects(numTxs, numAccs int) ([]transactions.Transaction, []transactions.SignedTxn, []*crypto.SignatureSecrets, []basics.Address) {
- txs := make([]transactions.Transaction, numTxs)
- signed := make([]transactions.SignedTxn, numTxs)
- secrets := make([]*crypto.SignatureSecrets, numAccs)
- addresses := make([]basics.Address, numAccs)
-
- for i := 0; i < numAccs; i++ {
- secret := keypair()
- addr := basics.Address(secret.SignatureVerifier)
- secrets[i] = secret
- addresses[i] = addr
- }
-
- for i := 0; i < numTxs; i++ {
- s := rand.Intn(numAccs)
- r := rand.Intn(numAccs)
- a := rand.Intn(1000)
- f := config.Consensus[protocol.ConsensusCurrentVersion].MinTxnFee + uint64(rand.Intn(10))
- iss := 50 + rand.Intn(30)
- exp := iss + 10
-
- txs[i] = transactions.Transaction{
- Header: transactions.Header{
- Sender: addresses[s],
- Fee: basics.MicroAlgos{Raw: f},
- FirstValid: basics.Round(iss),
- LastValid: basics.Round(exp),
- GenesisID: testGenesisID,
- GenesisHash: genesisHash,
- },
- }
-
- // Create half assets and half payment
- if i%2 == 0 {
- txs[i].Type = protocol.PaymentTx
- txs[i].PaymentTxnFields = transactions.PaymentTxnFields{
- Receiver: addresses[r],
- Amount: basics.MicroAlgos{Raw: uint64(a)},
- }
- } else {
- txs[i].Type = protocol.AssetTransferTx
- txs[i].AssetTransferTxnFields = transactions.AssetTransferTxnFields{
- AssetReceiver: addresses[r],
- AssetAmount: uint64(a),
- XferAsset: basics.AssetIndex(uint64(rand.Intn(20000))),
- }
- }
- signed[i] = txs[i].Sign(secrets[s])
- }
-
- return txs, signed, secrets, addresses
-}
-
-func keypair() *crypto.SignatureSecrets {
- var seed crypto.Seed
- crypto.RandBytes(seed[:])
- s := crypto.GenerateSignatureSecrets(seed)
- return s
-}
-
-type TestLedger struct {
-}
-
-func (l *TestLedger) Block(rnd basics.Round) (blk bookkeeping.Block, err error) {
- return bookkeeping.Block{}, nil
-}
-
-func (l *TestLedger) Wait(r basics.Round) chan struct{} {
- return nil
-}
diff --git a/node/msgp_gen.go b/node/msgp_gen.go
index 69851024d..88c92b37c 100644
--- a/node/msgp_gen.go
+++ b/node/msgp_gen.go
@@ -4,6 +4,9 @@ package node
import (
"github.com/algorand/msgp/msgp"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
)
// The following msgp objects are implemented in this file:
@@ -14,6 +17,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> NetPrioResponseMaxSize()
//
// netPrioResponseSigned
// |-----> (*) MarshalMsg
@@ -22,6 +26,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> NetPrioResponseSignedMaxSize()
//
// MarshalMsg implements msgp.Marshaler
@@ -66,6 +71,16 @@ func (z *netPrioResponse) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0001 > 0 {
zb0001--
+ var zb0003 int
+ zb0003, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Nonce")
+ return
+ }
+ if zb0003 > netPrioChallengeSize {
+ err = msgp.ErrOverflow(uint64(zb0003), uint64(netPrioChallengeSize))
+ return
+ }
(*z).Nonce, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Nonce")
@@ -96,6 +111,16 @@ func (z *netPrioResponse) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
switch string(field) {
case "Nonce":
+ var zb0004 int
+ zb0004, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Nonce")
+ return
+ }
+ if zb0004 > netPrioChallengeSize {
+ err = msgp.ErrOverflow(uint64(zb0004), uint64(netPrioChallengeSize))
+ return
+ }
(*z).Nonce, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Nonce")
@@ -130,6 +155,12 @@ func (z *netPrioResponse) MsgIsZero() bool {
return ((*z).Nonce == "")
}
+// MaxSize returns a maximum valid message size for this message type
+func NetPrioResponseMaxSize() (s int) {
+ s = 1 + 6 + msgp.StringPrefixSize + netPrioChallengeSize
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *netPrioResponseSigned) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -223,6 +254,16 @@ func (z *netPrioResponseSigned) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
if zb0003 > 0 {
zb0003--
+ var zb0005 int
+ zb0005, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Response", "struct-from-array", "Nonce")
+ return
+ }
+ if zb0005 > netPrioChallengeSize {
+ err = msgp.ErrOverflow(uint64(zb0005), uint64(netPrioChallengeSize))
+ return
+ }
(*z).Response.Nonce, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Response", "struct-from-array", "Nonce")
@@ -253,6 +294,16 @@ func (z *netPrioResponseSigned) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
switch string(field) {
case "Nonce":
+ var zb0006 int
+ zb0006, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Response", "Nonce")
+ return
+ }
+ if zb0006 > netPrioChallengeSize {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(netPrioChallengeSize))
+ return
+ }
(*z).Response.Nonce, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Response", "Nonce")
@@ -316,25 +367,35 @@ func (z *netPrioResponseSigned) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
switch string(field) {
case "Response":
- var zb0005 int
- var zb0006 bool
- zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0007, zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Response")
return
}
- if zb0005 > 0 {
- zb0005--
+ if zb0007 > 0 {
+ zb0007--
+ var zb0009 int
+ zb0009, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Response", "struct-from-array", "Nonce")
+ return
+ }
+ if zb0009 > netPrioChallengeSize {
+ err = msgp.ErrOverflow(uint64(zb0009), uint64(netPrioChallengeSize))
+ return
+ }
(*z).Response.Nonce, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Response", "struct-from-array", "Nonce")
return
}
}
- if zb0005 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0005)
+ if zb0007 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0007)
if err != nil {
err = msgp.WrapError(err, "Response", "struct-from-array")
return
@@ -345,11 +406,11 @@ func (z *netPrioResponseSigned) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "Response")
return
}
- if zb0006 {
+ if zb0008 {
(*z).Response = netPrioResponse{}
}
- for zb0005 > 0 {
- zb0005--
+ for zb0007 > 0 {
+ zb0007--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, "Response")
@@ -357,6 +418,16 @@ func (z *netPrioResponseSigned) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
switch string(field) {
case "Nonce":
+ var zb0010 int
+ zb0010, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Response", "Nonce")
+ return
+ }
+ if zb0010 > netPrioChallengeSize {
+ err = msgp.ErrOverflow(uint64(zb0010), uint64(netPrioChallengeSize))
+ return
+ }
(*z).Response.Nonce, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Response", "Nonce")
@@ -417,3 +488,9 @@ func (z *netPrioResponseSigned) Msgsize() (s int) {
func (z *netPrioResponseSigned) MsgIsZero() bool {
return ((*z).Response.Nonce == "") && ((*z).Round.MsgIsZero()) && ((*z).Sender.MsgIsZero()) && ((*z).Sig.MsgIsZero())
}
+
+// MaxSize returns a maximum valid message size for this message type
+func NetPrioResponseSignedMaxSize() (s int) {
+ s = 1 + 9 + 1 + 6 + msgp.StringPrefixSize + netPrioChallengeSize + 6 + basics.RoundMaxSize() + 7 + basics.AddressMaxSize() + 4 + crypto.OneTimeSignatureMaxSize()
+ return
+}
diff --git a/node/netprio.go b/node/netprio.go
index 1bf6c17a1..8b1ab62f1 100644
--- a/node/netprio.go
+++ b/node/netprio.go
@@ -26,10 +26,12 @@ import (
"github.com/algorand/go-algorand/protocol"
)
+const netPrioChallengeSize = 32
+
type netPrioResponse struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
- Nonce string
+ Nonce string `codec:"Nonce,allocbound=netPrioChallengeSize"`
}
type netPrioResponseSigned struct {
@@ -47,7 +49,7 @@ func (npr netPrioResponse) ToBeHashed() (protocol.HashID, []byte) {
// NewPrioChallenge implements the network.NetPrioScheme interface
func (node *AlgorandFullNode) NewPrioChallenge() string {
- var rand [32]byte
+ var rand [netPrioChallengeSize]byte
crypto.RandBytes(rand[:])
return base64.StdEncoding.EncodeToString(rand[:])
}
diff --git a/node/node.go b/node/node.go
index 3ff616da6..b3b508e91 100644
--- a/node/node.go
+++ b/node/node.go
@@ -47,7 +47,6 @@ import (
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/network/messagetracer"
- "github.com/algorand/go-algorand/node/indexer"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/rpcs"
"github.com/algorand/go-algorand/stateproof"
@@ -128,8 +127,6 @@ type AlgorandFullNode struct {
ledgerService *rpcs.LedgerService
txPoolSyncerService *rpcs.TxSyncer
- indexer *indexer.Indexer
-
rootDir string
genesisID string
genesisHash crypto.Digest
@@ -244,15 +241,6 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
return nil, err
}
- // Indexer setup
- if cfg.IsIndexerActive && cfg.Archival {
- node.indexer, err = indexer.MakeIndexer(genesisDir, node.ledger, false)
- if err != nil {
- logging.Base().Errorf("failed to make indexer - %v", err)
- return nil, err
- }
- }
-
node.blockService = rpcs.MakeBlockService(node.log, cfg, node.ledger, p2pNode, node.genesisID)
node.ledgerService = rpcs.MakeLedgerService(cfg, node.ledger, p2pNode, node.genesisID)
rpcs.RegisterTxService(node.transactionPool, p2pNode, node.genesisID, cfg.TxPoolSize, cfg.TxSyncServeResponseSize)
@@ -369,18 +357,6 @@ func (node *AlgorandFullNode) Start() {
node.txHandler.Start()
node.stateProofWorker.Start()
startNetwork()
- // start indexer
- if idx, err := node.Indexer(); err == nil {
- err := idx.Start()
- if err != nil {
- node.log.Errorf("indexer failed to start, turning it off - %v", err)
- node.config.IsIndexerActive = false
- } else {
- node.log.Info("Indexer was started successfully")
- }
- } else {
- node.log.Infof("Indexer is not available - %v", err)
- }
node.startMonitoringRoutines()
}
@@ -442,9 +418,6 @@ func (node *AlgorandFullNode) Stop() {
node.lowPriorityCryptoVerificationPool.Shutdown()
node.cryptoPool.Shutdown()
node.cancelCtx()
- if node.indexer != nil {
- node.indexer.Shutdown()
- }
}
// note: unlike the other two functions, this accepts a whole filename
@@ -566,7 +539,7 @@ func (node *AlgorandFullNode) broadcastSignedTxGroup(txgroup []transactions.Sign
// Simulate speculatively runs a transaction group against the current
// blockchain state and returns the effects and/or errors that would result.
func (node *AlgorandFullNode) Simulate(request simulation.Request) (result simulation.Result, err error) {
- simulator := simulation.MakeSimulator(node.ledger)
+ simulator := simulation.MakeSimulator(node.ledger, node.config.EnableDeveloperAPI)
return simulator.Simulate(request)
}
@@ -1137,14 +1110,6 @@ func (node *AlgorandFullNode) Uint64() uint64 {
return crypto.RandUint64()
}
-// Indexer returns a pointer to nodes indexer
-func (node *AlgorandFullNode) Indexer() (*indexer.Indexer, error) {
- if node.indexer != nil && node.config.IsIndexerActive {
- return node.indexer, nil
- }
- return nil, fmt.Errorf("indexer is not active")
-}
-
// GetTransactionByID gets transaction by ID
// this function is intended to be called externally via the REST api interface.
func (node *AlgorandFullNode) GetTransactionByID(txid transactions.Txid, rnd basics.Round) (TxnWithStatus, error) {
@@ -1164,8 +1129,8 @@ func (node *AlgorandFullNode) GetTransactionByID(txid transactions.Txid, rnd bas
func (node *AlgorandFullNode) StartCatchup(catchpoint string) error {
node.mu.Lock()
defer node.mu.Unlock()
- if node.indexer != nil {
- return fmt.Errorf("catching up using a catchpoint is not supported on indexer-enabled nodes")
+ if node.config.Archival {
+ return fmt.Errorf("catching up using a catchpoint is not supported on archive nodes")
}
if node.catchpointCatchupService != nil {
stats := node.catchpointCatchupService.GetStatistics()
@@ -1182,7 +1147,11 @@ func (node *AlgorandFullNode) StartCatchup(catchpoint string) error {
node.log.Warnf("unable to create catchpoint catchup service : %v", err)
return err
}
- node.catchpointCatchupService.Start(node.ctx)
+ err = node.catchpointCatchupService.Start(node.ctx)
+ if err != nil {
+ node.log.Warnf(err.Error())
+ return MakeStartCatchpointError(catchpoint, err)
+ }
node.log.Infof("starting catching up toward catchpoint %s", catchpoint)
return nil
}
@@ -1257,19 +1226,6 @@ func (node *AlgorandFullNode) SetCatchpointCatchupMode(catchpointCatchupMode boo
node.txHandler.Start()
node.stateProofWorker.Start()
- // start indexer
- if idx, err := node.Indexer(); err == nil {
- err := idx.Start()
- if err != nil {
- node.log.Errorf("indexer failed to start, turning it off - %v", err)
- node.config.IsIndexerActive = false
- } else {
- node.log.Info("Indexer was started successfully")
- }
- } else {
- node.log.Infof("Indexer is not available - %v", err)
- }
-
// Set up a context we can use to cancel goroutines on Stop()
node.ctx, node.cancelCtx = context.WithCancel(context.Background())
diff --git a/node/node_test.go b/node/node_test.go
index f58471090..64e285eac 100644
--- a/node/node_test.go
+++ b/node/node_test.go
@@ -30,13 +30,18 @@ import (
"github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/stateproof"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/algorand/go-algorand/util"
"github.com/algorand/go-algorand/util/db"
@@ -539,3 +544,54 @@ func TestOfflineOnlineClosedBitStatus(t *testing.T) {
})
}
}
+
+// TestMaxSizesCorrect tests that constants defined in the protocol package are correct
+// and match the MaxSize() values of associated msgp encodable structs.
+// the test is located here since it needs to import various other packages.
+//
+// If this test fails, DO NOT JUST UPDATE THE CONSTANTS OR MODIFY THE TEST!
+// Instead you need to introduce a new version of the protocol and mechanisms
+// to ensure that nodes on different proto versions don't reject each others messages due to exceeding
+// max size network protocol version
+func TestMaxSizesCorrect(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ /************************************************
+ * ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! *
+ * Read the comment before touching this test! *
+ * ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! *
+ *************************************************
+ */ ////////////////////////////////////////////////
+ avSize := uint64(agreement.UnauthenticatedVoteMaxSize())
+ require.Equal(t, avSize, protocol.AgreementVoteTag.MaxMessageSize())
+ miSize := uint64(network.MessageOfInterestMaxSize())
+ require.Equal(t, miSize, protocol.MsgOfInterestTag.MaxMessageSize())
+ npSize := uint64(NetPrioResponseSignedMaxSize())
+ require.Equal(t, npSize, protocol.NetPrioResponseTag.MaxMessageSize())
+ nsSize := uint64(network.IdentityVerificationMessageSignedMaxSize())
+ require.Equal(t, nsSize, protocol.NetIDVerificationTag.MaxMessageSize())
+ piSize := uint64(network.PingLength)
+ require.Equal(t, piSize, protocol.PingTag.MaxMessageSize())
+ pjSize := uint64(network.PingLength)
+ require.Equal(t, pjSize, protocol.PingReplyTag.MaxMessageSize())
+ ppSize := uint64(agreement.TransmittedPayloadMaxSize())
+ require.Equal(t, ppSize, protocol.ProposalPayloadTag.MaxMessageSize())
+ spSize := uint64(stateproof.SigFromAddrMaxSize())
+ require.Equal(t, spSize, protocol.StateProofSigTag.MaxMessageSize())
+ txSize := uint64(transactions.SignedTxnMaxSize())
+ require.Equal(t, txSize, protocol.TxnTag.MaxMessageSize())
+ msSize := uint64(crypto.DigestMaxSize())
+ require.Equal(t, msSize, protocol.MsgDigestSkipTag.MaxMessageSize())
+
+ // UE is a handrolled message not using msgp
+ // including here for completeness ensured by protocol.TestMaxSizesTested
+ ueSize := uint64(67)
+ require.Equal(t, ueSize, protocol.UniEnsBlockReqTag.MaxMessageSize())
+
+ // VB and TS are the largest messages and are using the default network max size
+ // including here for completeness ensured by protocol.TestMaxSizesTested
+ vbSize := uint64(network.MaxMessageLength)
+ require.Equal(t, vbSize, protocol.VoteBundleTag.MaxMessageSize())
+ tsSize := uint64(network.MaxMessageLength)
+ require.Equal(t, tsSize, protocol.TopicMsgRespTag.MaxMessageSize())
+}
diff --git a/protocol/codec_tester.go b/protocol/codec_tester.go
index 59a712e55..49cae937b 100644
--- a/protocol/codec_tester.go
+++ b/protocol/codec_tester.go
@@ -163,6 +163,8 @@ func checkBoundsLimitingTag(val reflect.Value, datapath string, structTag string
objType = "slice"
} else if val.Kind() == reflect.Map {
objType = "map"
+ } else if val.Kind() == reflect.String {
+ objType = "string"
}
if structTag != "" {
@@ -240,12 +242,18 @@ func randomizeValue(v reflect.Value, datapath string, tag string, remainingChang
v.SetInt(int64(rand.Uint64()))
*remainingChanges--
case reflect.String:
+ hasAllocBound := checkBoundsLimitingTag(v, datapath, tag)
var buf []byte
var len int
if strings.HasSuffix(v.Type().PkgPath(), "go-algorand/agreement") && v.Type().Name() == "serializableError" {
// Don't generate empty strings for serializableError since nil values of *string type
// will serialize differently by msgp and go-codec
len = rand.Int()%63 + 1
+ } else if strings.HasSuffix(v.Type().PkgPath(), "go-algorand/protocol") && v.Type().Name() == "TxType" {
+ // protocol.TxType has allocbound defined as a custom msgp:allocbound directive so not supported by reflect
+ len = rand.Int()%6 + 1
+ } else if hasAllocBound {
+ len = 1
} else {
len = rand.Int() % 64
}
diff --git a/protocol/consensus.go b/protocol/consensus.go
index 54aa5939d..b32e24566 100644
--- a/protocol/consensus.go
+++ b/protocol/consensus.go
@@ -22,8 +22,14 @@ import (
// ConsensusVersion is a string that identifies a version of the
// consensus protocol.
+//
+//msgp:allocbound ConsensusVersion maxConsensusVersionLen
type ConsensusVersion string
+// maxConsensusVersionLen is used for generating MaxSize functions on types that contain ConsensusVersion
+// as it's member. 128 is slightly larger than the existing URL length of consensus version URL+hash=89
+const maxConsensusVersionLen = 128
+
// DEPRECATEDConsensusV0 is a baseline version of the Algorand consensus protocol.
// at the time versioning was introduced.
// It is now deprecated.
diff --git a/protocol/hash_test.go b/protocol/hash_test.go
index e490987c1..b3885d18c 100644
--- a/protocol/hash_test.go
+++ b/protocol/hash_test.go
@@ -29,7 +29,7 @@ func TestHashIDPrefix(t *testing.T) {
t.Parallel()
partitiontest.PartitionTest(t)
- values := getConstValues(t, "hash.go", "HashID")
+ values := getConstValues(t, "hash.go", "HashID", false)
for i, v1 := range values {
for j, v2 := range values {
if i == j {
diff --git a/protocol/msgp_gen.go b/protocol/msgp_gen.go
index 9d9a660bb..db191f018 100644
--- a/protocol/msgp_gen.go
+++ b/protocol/msgp_gen.go
@@ -14,6 +14,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> ConsensusVersionMaxSize()
//
// Error
// |-----> MarshalMsg
@@ -22,6 +23,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> ErrorMaxSize()
//
// HashID
// |-----> MarshalMsg
@@ -30,6 +32,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> HashIDMaxSize()
//
// NetworkID
// |-----> MarshalMsg
@@ -38,6 +41,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> NetworkIDMaxSize()
//
// StateProofType
// |-----> MarshalMsg
@@ -46,6 +50,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> StateProofTypeMaxSize()
//
// Tag
// |-----> MarshalMsg
@@ -54,6 +59,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> TagMaxSize()
//
// TxType
// |-----> MarshalMsg
@@ -62,6 +68,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> TxTypeMaxSize()
//
// MarshalMsg implements msgp.Marshaler
@@ -83,6 +90,16 @@ func (_ ConsensusVersion) CanMarshalMsg(z interface{}) bool {
func (z *ConsensusVersion) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 string
+ var zb0002 int
+ zb0002, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 > maxConsensusVersionLen {
+ err = msgp.ErrOverflow(uint64(zb0002), uint64(maxConsensusVersionLen))
+ return
+ }
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
@@ -110,6 +127,12 @@ func (z ConsensusVersion) MsgIsZero() bool {
return z == ""
}
+// MaxSize returns a maximum valid message size for this message type
+func ConsensusVersionMaxSize() (s int) {
+ s = msgp.StringPrefixSize + maxConsensusVersionLen
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z Error) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -156,6 +179,12 @@ func (z Error) MsgIsZero() bool {
return z == ""
}
+// MaxSize returns a maximum valid message size for this message type
+func ErrorMaxSize() (s int) {
+ panic("Unable to determine max size: String type string(z) is unbounded")
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z HashID) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -202,6 +231,12 @@ func (z HashID) MsgIsZero() bool {
return z == ""
}
+// MaxSize returns a maximum valid message size for this message type
+func HashIDMaxSize() (s int) {
+ panic("Unable to determine max size: String type string(z) is unbounded")
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z NetworkID) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -248,6 +283,12 @@ func (z NetworkID) MsgIsZero() bool {
return z == ""
}
+// MaxSize returns a maximum valid message size for this message type
+func NetworkIDMaxSize() (s int) {
+ panic("Unable to determine max size: String type string(z) is unbounded")
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z StateProofType) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -294,6 +335,12 @@ func (z StateProofType) MsgIsZero() bool {
return z == 0
}
+// MaxSize returns a maximum valid message size for this message type
+func StateProofTypeMaxSize() (s int) {
+ s = msgp.Uint64Size
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z Tag) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -340,6 +387,12 @@ func (z Tag) MsgIsZero() bool {
return z == ""
}
+// MaxSize returns a maximum valid message size for this message type
+func TagMaxSize() (s int) {
+ panic("Unable to determine max size: String type string(z) is unbounded")
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z TxType) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -359,6 +412,16 @@ func (_ TxType) CanMarshalMsg(z interface{}) bool {
func (z *TxType) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 string
+ var zb0002 int
+ zb0002, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 > txTypeMaxLen {
+ err = msgp.ErrOverflow(uint64(zb0002), uint64(txTypeMaxLen))
+ return
+ }
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
@@ -385,3 +448,9 @@ func (z TxType) Msgsize() (s int) {
func (z TxType) MsgIsZero() bool {
return z == ""
}
+
+// MaxSize returns a maximum valid message size for this message type
+func TxTypeMaxSize() (s int) {
+ s = msgp.StringPrefixSize + txTypeMaxLen
+ return
+}
diff --git a/protocol/stateproof.go b/protocol/stateproof.go
index 4e634fc37..1e55ef652 100644
--- a/protocol/stateproof.go
+++ b/protocol/stateproof.go
@@ -35,6 +35,7 @@ const (
// SortStateProofType implements sorting by StateProofType keys for
// canonical encoding of maps in msgpack format.
+//
//msgp:ignore SortStateProofType
//msgp:sort StateProofType SortStateProofType
type SortStateProofType []StateProofType
diff --git a/protocol/tags.go b/protocol/tags.go
index ef44e74ac..249da317e 100644
--- a/protocol/tags.go
+++ b/protocol/tags.go
@@ -20,6 +20,9 @@ package protocol
// e.g., the agreement service can register to handle agreements with the Agreement tag.
type Tag string
+// TagLength specifies the length of protocol tags.
+const TagLength = 2
+
// Tags, in lexicographic sort order of tag values to avoid duplicates.
// These tags must not contain a comma character because lists of tags
// are encoded using a comma separator (see network/msgOfInterest.go).
@@ -43,6 +46,90 @@ const (
VoteBundleTag Tag = "VB"
)
+// The following constants are overestimates in some cases but are reasonable upper bounds
+// for the purposes of limiting the number of bytes read from the network.
+// The calculations to obtain them are defined in node/TestMaxSizesCorrect()
+
+// AgreementVoteTagMaxSize is the maximum size of an AgreementVoteTag message
+const AgreementVoteTagMaxSize = 1228
+
+// MsgOfInterestTagMaxSize is the maximum size of a MsgOfInterestTag message
+const MsgOfInterestTagMaxSize = 45
+
+// MsgDigestSkipTagMaxSize is the maximum size of a MsgDigestSkipTag message
+const MsgDigestSkipTagMaxSize = 69
+
+// NetPrioResponseTagMaxSize is the maximum size of a NetPrioResponseTag message
+const NetPrioResponseTagMaxSize = 838
+
+// NetIDVerificationTagMaxSize is the maximum size of a NetIDVerificationTag message
+const NetIDVerificationTagMaxSize = 215
+
+// PingTagMaxSize is the maximum size of a PingTag message
+const PingTagMaxSize = 8
+
+// PingReplyTagMaxSize is the maximum size of a PingReplyTag message
+const PingReplyTagMaxSize = 8
+
+// ProposalPayloadTagMaxSize is the maximum size of a ProposalPayloadTag message
+// This value is dominated by the MaxTxnBytesPerBlock
+const ProposalPayloadTagMaxSize = 5247980
+
+// StateProofSigTagMaxSize is the maximum size of a StateProofSigTag message
+const StateProofSigTagMaxSize = 6378
+
+// TopicMsgRespTagMaxSize is the maximum size of a TopicMsgRespTag message
+// This is a response to a topic message request (either UE or MI) and the largest possible
+// response is the largest possible block.
+// Matches current network.MaxMessageLength
+const TopicMsgRespTagMaxSize = 6 * 1024 * 1024
+
+// TxnTagMaxSize is the maximum size of a TxnTag message. This is equal to SignedTxnMaxSize()
+// which is size of just a single message containing maximum Stateproof. Since Stateproof
+// transactions can't be batched we don't need to multiply by MaxTxnBatchSize.
+const TxnTagMaxSize = 4620031
+
+// UniEnsBlockReqTagMaxSize is the maximum size of a UniEnsBlockReqTag message
+const UniEnsBlockReqTagMaxSize = 67
+
+// VoteBundleTagMaxSize is the maximum size of a VoteBundleTag message
+// Matches current network.MaxMessageLength
+const VoteBundleTagMaxSize = 6 * 1024 * 1024
+
+// MaxMessageSize returns the maximum size of a message for a given tag
+func (tag Tag) MaxMessageSize() uint64 {
+ switch tag {
+ case AgreementVoteTag:
+ return AgreementVoteTagMaxSize
+ case MsgOfInterestTag:
+ return MsgOfInterestTagMaxSize
+ case MsgDigestSkipTag:
+ return MsgDigestSkipTagMaxSize
+ case NetPrioResponseTag:
+ return NetPrioResponseTagMaxSize
+ case NetIDVerificationTag:
+ return NetIDVerificationTagMaxSize
+ case PingTag:
+ return PingTagMaxSize
+ case PingReplyTag:
+ return PingReplyTagMaxSize
+ case ProposalPayloadTag:
+ return ProposalPayloadTagMaxSize
+ case StateProofSigTag:
+ return StateProofSigTagMaxSize
+ case TopicMsgRespTag:
+ return TopicMsgRespTagMaxSize
+ case TxnTag:
+ return TxnTagMaxSize
+ case UniEnsBlockReqTag:
+ return UniEnsBlockReqTagMaxSize
+ case VoteBundleTag:
+ return VoteBundleTagMaxSize
+ default:
+ return 0 // Unknown tag
+ }
+}
+
// TagList is a list of all currently used protocol tags.
var TagList = []Tag{
AgreementVoteTag,
@@ -59,3 +146,13 @@ var TagList = []Tag{
UniEnsBlockReqTag,
VoteBundleTag,
}
+
+// TagMap is a map of all currently used protocol tags.
+var TagMap map[Tag]struct{}
+
+func init() {
+ TagMap = make(map[Tag]struct{})
+ for _, tag := range TagList {
+ TagMap[tag] = struct{}{}
+ }
+}
diff --git a/protocol/tags_test.go b/protocol/tags_test.go
index 087e32a82..7cf0bea93 100644
--- a/protocol/tags_test.go
+++ b/protocol/tags_test.go
@@ -17,6 +17,7 @@
package protocol
import (
+ "fmt"
"go/ast"
"go/parser"
"go/token"
@@ -29,7 +30,8 @@ import (
// getConstValues uses the AST to get a list of the values of declared const
// variables of the provided typeName in a specified fileName.
-func getConstValues(t *testing.T, fileName string, typeName string) []string {
+// if namesOnly is true, it returns the names of the const variables instead.
+func getConstValues(t *testing.T, fileName string, typeName string, namesOnly bool) []string {
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, fileName, nil, 0)
require.NoError(t, err)
@@ -54,6 +56,11 @@ func getConstValues(t *testing.T, fileName string, typeName string) []string {
if v.Type == nil || v.Type.(*ast.Ident).Name != typeName {
continue
}
+
+ if namesOnly {
+ ret = append(ret, v.Names[0].Name)
+ continue
+ }
// Iterate through the expressions in the value spec
for _, expr := range v.Values {
val, ok := expr.(*ast.BasicLit)
@@ -77,7 +84,7 @@ func TestTagList(t *testing.T) {
t.Parallel()
partitiontest.PartitionTest(t)
- constTags := getConstValues(t, "tags.go", "Tag")
+ constTags := getConstValues(t, "tags.go", "Tag", false)
// Verify that TagList is not empty and has the same length as constTags
require.NotEmpty(t, TagList)
@@ -96,3 +103,158 @@ func TestTagList(t *testing.T) {
}
require.Empty(t, tagListMap, "Unseen tags remain in TagList")
}
+
+func TestMaxSizesDefined(t *testing.T) {
+ t.Parallel()
+ partitiontest.PartitionTest(t)
+ // Verify that we have a nonzero max message size for each tag in the TagList
+ for _, tag := range TagList {
+ require.Greater(t, tag.MaxMessageSize(), uint64(0))
+ }
+}
+
+// TestMaxSizesTested checks that each Tag in the TagList has a corresponding line in the TestMaxSizesCorrect test in node_test.go
+func TestMaxSizesTested(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ constTags := getConstValues(t, "tags.go", "Tag", true)
+
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "../node/node_test.go", nil, 0)
+ require.NoError(t, err)
+ // Iterate through the declarations in the file
+
+ tagsFound := make(map[string]bool)
+ for _, d := range f.Decls {
+ gen, ok := d.(*ast.FuncDecl)
+ // Check if the declaration is a Function Declaration and if it is the TestMaxMessageSize function
+ if !ok || gen.Name.Name != "TestMaxSizesCorrect" {
+ continue
+ }
+ // Iterate through stmt in the function
+ for _, stmt := range gen.Body.List {
+ // Check if the spec is a value spec
+ _ = stmt
+ switch stmt := stmt.(type) {
+ case *ast.ExprStmt:
+ expr, ok := stmt.X.(*ast.CallExpr)
+ if !ok {
+ continue
+ }
+ sel, ok := expr.Fun.(*ast.SelectorExpr)
+ if !ok || fmt.Sprintf("%s.%s", sel.X, sel.Sel.Name) != "require.Equal" {
+ continue
+ }
+ // we are in the require.Equal function call and need to check the third argument
+ call, ok := expr.Args[2].(*ast.CallExpr)
+ if !ok {
+ continue
+ }
+ tagSel, ok := call.Fun.(*ast.SelectorExpr)
+ if !ok || tagSel.Sel.Name != "MaxMessageSize" {
+ continue
+ }
+ tagSel, ok = tagSel.X.(*ast.SelectorExpr)
+ if !ok || fmt.Sprintf("%s", tagSel.X) != "protocol" {
+ continue
+ }
+ // We have found the tag name on which MaxMessageSize() is called and used in require.Equal
+ // add it to the map
+ tagsFound[tagSel.Sel.Name] = true
+ default:
+ continue
+ }
+ }
+ }
+
+ for _, tag := range constTags {
+ require.Truef(t, tagsFound[tag], "Tag %s does not have a corresponding test in TestMaxSizesCorrect", tag)
+ }
+}
+
+// Switch vs Map justification
+// BenchmarkTagsMaxMessageSizeSwitch-8 11358924 104.0 ns/op
+// BenchmarkTagsMaxMessageSizeMap-8 10242530 117.4 ns/op
+func BenchmarkTagsMaxMessageSizeSwitch(b *testing.B) {
+ // warmup like the Map benchmark below
+ tagsmap := make(map[Tag]uint64, len(TagList))
+ for _, tag := range TagList {
+ tagsmap[tag] = tag.MaxMessageSize()
+ }
+
+ b.ResetTimer()
+
+ var total uint64
+ for i := 0; i < b.N; i++ {
+ for _, tag := range TagList {
+ total += tag.MaxMessageSize()
+ }
+ }
+ require.Greater(b, total, uint64(0))
+}
+
+func BenchmarkTagsMaxMessageSizeMap(b *testing.B) {
+ tagsmap := make(map[Tag]uint64, len(TagList))
+ for _, tag := range TagList {
+ tagsmap[tag] = tag.MaxMessageSize()
+ }
+
+ b.ResetTimer()
+ var total uint64
+ for i := 0; i < b.N; i++ {
+ for _, tag := range TagList {
+ total += tagsmap[tag]
+ }
+ }
+ require.Greater(b, total, uint64(0))
+}
+
+// TestLockdownTagList locks down the list of tags in the code.
+//
+// The node will drop the connection when the connecting node requests
+// a message of interest which is not in this list. This is a backward
+// compatibility problem. When a new tag is introduced, the nodes with
+// older version will not connect to the nodes running the new
+// version.
+//
+// It is necessary to check the version of the other node before
+// sending a request for a newly added tag. Currently, version
+// checking is not implemented.
+//
+// Similarly, When removing a tag, it is important to support requests
+// for the removed tag from nodes running an older version.
+func TestLockdownTagList(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ /************************************************
+ * ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! *
+ * Read the comment before touching this test! *
+ * ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! *
+ *************************************************
+ */ ////////////////////////////////////////////////
+ var tagList = []Tag{
+ AgreementVoteTag,
+ MsgOfInterestTag,
+ MsgDigestSkipTag,
+ NetIDVerificationTag,
+ NetPrioResponseTag,
+ PingTag,
+ PingReplyTag,
+ ProposalPayloadTag,
+ StateProofSigTag,
+ TopicMsgRespTag,
+ TxnTag,
+ UniEnsBlockReqTag,
+ VoteBundleTag,
+ }
+ require.Equal(t, len(tagList), len(TagList))
+ tagMap := make(map[Tag]bool)
+ for _, tag := range tagList {
+ tagMap[tag] = true
+ _, has := TagMap[tag]
+ require.True(t, has)
+ }
+ for _, tag := range TagList {
+ require.True(t, tagMap[tag])
+ }
+}
diff --git a/protocol/test/msgp_gen.go b/protocol/test/msgp_gen.go
index ac8ec8b74..518afb1cb 100644
--- a/protocol/test/msgp_gen.go
+++ b/protocol/test/msgp_gen.go
@@ -14,6 +14,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> Msgsize
// |-----> MsgIsZero
+// |-----> TestSliceMaxSize()
//
// MarshalMsg implements msgp.Marshaler
@@ -85,3 +86,10 @@ func (z testSlice) Msgsize() (s int) {
func (z testSlice) MsgIsZero() bool {
return len(z) == 0
}
+
+// MaxSize returns a maximum valid message size for this message type
+func TestSliceMaxSize() (s int) {
+ // Calculating size of slice: z
+ s += msgp.ArrayHeaderSize + ((16) * (msgp.Uint64Size))
+ return
+}
diff --git a/protocol/txntype.go b/protocol/txntype.go
index d0cfe058b..0728898f1 100644
--- a/protocol/txntype.go
+++ b/protocol/txntype.go
@@ -22,6 +22,9 @@ package protocol
// TxType is the type of the transaction written to the ledger
type TxType string
+//msgp:allocbound TxType txTypeMaxLen
+const txTypeMaxLen = 7
+
const (
// PaymentTx indicates a payment transaction
PaymentTx TxType = "pay"
diff --git a/rpcs/blockService.go b/rpcs/blockService.go
index f668a1c52..0f48f873c 100644
--- a/rpcs/blockService.go
+++ b/rpcs/blockService.go
@@ -20,11 +20,13 @@ import (
"context"
"encoding/binary"
"errors"
+ "fmt"
"net/http"
"path"
"strconv"
"strings"
"sync"
+ "sync/atomic"
"github.com/gorilla/mux"
@@ -41,12 +43,14 @@ import (
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util/metrics"
)
// BlockResponseContentType is the HTTP Content-Type header for a raw binary block
const BlockResponseContentType = "application/x-algorand-block-v1"
const blockResponseHasBlockCacheControl = "public, max-age=31536000, immutable" // 31536000 seconds are one year.
const blockResponseMissingBlockCacheControl = "public, max-age=1, must-revalidate" // cache for 1 second, and force revalidation afterward
+const blockResponseRetryAfter = "3" // retry after 3 seconds
const blockServerMaxBodyLength = 512 // we don't really pass meaningful content here, so 512 bytes should be a safe limit
const blockServerCatchupRequestBufferSize = 10
@@ -65,6 +69,21 @@ const (
var errBlockServiceClosed = errors.New("block service is shutting down")
+const errMemoryAtCapacityPublic = "block service memory over capacity"
+
+type errMemoryAtCapacity struct{ capacity, used uint64 }
+
+func (err errMemoryAtCapacity) Error() string {
+ return fmt.Sprintf("block service memory over capacity: %d / %d", err.used, err.capacity)
+}
+
+var wsBlockMessagesDroppedCounter = metrics.MakeCounter(
+ metrics.MetricName{Name: "algod_rpcs_ws_reqs_dropped", Description: "Number of websocket block requests dropped due to memory capacity"},
+)
+var httpBlockMessagesDroppedCounter = metrics.MakeCounter(
+ metrics.MetricName{Name: "algod_rpcs_http_reqs_dropped", Description: "Number of http block requests dropped due to memory capacity"},
+)
+
// LedgerForBlockService describes the Ledger methods used by BlockService.
type LedgerForBlockService interface {
EncodedBlockCert(rnd basics.Round) (blk []byte, cert []byte, err error)
@@ -84,6 +103,9 @@ type BlockService struct {
log logging.Logger
closeWaitGroup sync.WaitGroup
mu deadlock.Mutex
+ memoryUsed uint64
+ wsMemoryUsed uint64
+ memoryCap uint64
}
// EncodedBlockCert defines how GetBlockBytes encodes a block and its certificate
@@ -96,6 +118,7 @@ type EncodedBlockCert struct {
// PreEncodedBlockCert defines how GetBlockBytes encodes a block and its certificate,
// using a pre-encoded Block and Certificate in msgpack format.
+//
//msgp:ignore PreEncodedBlockCert
type PreEncodedBlockCert struct {
Block codec.Raw `codec:"block"`
@@ -119,6 +142,7 @@ func MakeBlockService(log logging.Logger, config config.Local, ledger LedgerForB
fallbackEndpoints: makeFallbackEndpoints(log, config.BlockServiceCustomFallbackEndpoints),
enableArchiverFallback: config.EnableBlockServiceFallbackToArchiver,
log: log,
+ memoryCap: config.BlockServiceMemCap,
}
if service.enableService {
net.RegisterHTTPHandler(BlockServiceBlockPath, service)
@@ -224,6 +248,16 @@ func (bs *BlockService) ServeHTTP(response http.ResponseWriter, request *http.Re
response.WriteHeader(http.StatusNotFound)
}
return
+ case errMemoryAtCapacity:
+ // memory used by HTTP block requests is over the cap
+ ok := bs.redirectRequest(round, response, request)
+ if !ok {
+ response.Header().Set("Retry-After", blockResponseRetryAfter)
+ response.WriteHeader(http.StatusServiceUnavailable)
+ bs.log.Debugf("ServeHTTP: returned retry-after: %v", err)
+ }
+ httpBlockMessagesDroppedCounter.Inc(nil)
+ return
default:
// unexpected error.
bs.log.Warnf("ServeHTTP : failed to retrieve block %d %v", round, err)
@@ -240,6 +274,9 @@ func (bs *BlockService) ServeHTTP(response http.ResponseWriter, request *http.Re
if err != nil {
bs.log.Warn("http block write failed ", err)
}
+ bs.mu.Lock()
+ defer bs.mu.Unlock()
+ bs.memoryUsed = bs.memoryUsed - uint64(len(encodedBlockCert))
}
func (bs *BlockService) processIncomingMessage(msg network.IncomingMessage) (n network.OutgoingMessage) {
@@ -277,11 +314,35 @@ const datatypeUnsupportedErrMsg = "requested data type is unsupported"
func (bs *BlockService) handleCatchupReq(ctx context.Context, reqMsg network.IncomingMessage) {
target := reqMsg.Sender.(network.UnicastPeer)
var respTopics network.Topics
+ var n uint64
defer func() {
- target.Respond(ctx, reqMsg, respTopics)
+ outMsg := network.OutgoingMessage{Topics: respTopics}
+ if n > 0 {
+ outMsg.OnRelease = func() {
+ atomic.AddUint64(&bs.wsMemoryUsed, ^uint64(n-1))
+ }
+ atomic.AddUint64(&bs.wsMemoryUsed, (n))
+ }
+ err := target.Respond(ctx, reqMsg, outMsg)
+ if err != nil {
+ bs.log.Warnf("BlockService handleCatchupReq: failed to respond: %s", err)
+ }
}()
+ // If we are over-capacity, we will not process the request
+ // respond to sender with error message
+ memUsed := atomic.LoadUint64(&bs.wsMemoryUsed)
+ if memUsed > bs.memoryCap {
+ err := errMemoryAtCapacity{capacity: bs.memoryCap, used: memUsed}
+ bs.log.Infof("BlockService handleCatchupReq: %s", err.Error())
+ respTopics = network.Topics{
+ network.MakeTopic(network.ErrorKey, []byte(errMemoryAtCapacityPublic)),
+ }
+ wsBlockMessagesDroppedCounter.Inc(nil)
+ return
+ }
+
topics, err := network.UnmarshallTopics(reqMsg.Data)
if err != nil {
bs.log.Infof("BlockService handleCatchupReq: %s", err.Error())
@@ -314,7 +375,7 @@ func (bs *BlockService) handleCatchupReq(ctx context.Context, reqMsg network.Inc
[]byte(roundNumberParseErrMsg))}
return
}
- respTopics = topicBlockBytes(bs.log, bs.ledger, basics.Round(round), string(requestType))
+ respTopics, n = topicBlockBytes(bs.log, bs.ledger, basics.Round(round), string(requestType))
return
}
@@ -382,10 +443,17 @@ func (bs *BlockService) rawBlockBytes(round basics.Round) ([]byte, error) {
}
default:
}
- return RawBlockBytes(bs.ledger, round)
+ if bs.memoryUsed > bs.memoryCap {
+ return nil, errMemoryAtCapacity{used: bs.memoryUsed, capacity: bs.memoryCap}
+ }
+ data, err := RawBlockBytes(bs.ledger, round)
+ if err == nil {
+ bs.memoryUsed = bs.memoryUsed + uint64(len(data))
+ }
+ return data, err
}
-func topicBlockBytes(log logging.Logger, dataLedger LedgerForBlockService, round basics.Round, requestType string) network.Topics {
+func topicBlockBytes(log logging.Logger, dataLedger LedgerForBlockService, round basics.Round, requestType string) (network.Topics, uint64) {
blk, cert, err := dataLedger.EncodedBlockCert(round)
if err != nil {
switch err.(type) {
@@ -394,7 +462,7 @@ func topicBlockBytes(log logging.Logger, dataLedger LedgerForBlockService, round
log.Infof("BlockService topicBlockBytes: %s", err)
}
return network.Topics{
- network.MakeTopic(network.ErrorKey, []byte(blockNotAvailableErrMsg))}
+ network.MakeTopic(network.ErrorKey, []byte(blockNotAvailableErrMsg))}, 0
}
switch requestType {
case BlockAndCertValue:
@@ -403,10 +471,10 @@ func topicBlockBytes(log logging.Logger, dataLedger LedgerForBlockService, round
BlockDataKey, blk),
network.MakeTopic(
CertDataKey, cert),
- }
+ }, uint64(len(blk) + len(cert))
default:
return network.Topics{
- network.MakeTopic(network.ErrorKey, []byte(datatypeUnsupportedErrMsg))}
+ network.MakeTopic(network.ErrorKey, []byte(datatypeUnsupportedErrMsg))}, 0
}
}
diff --git a/rpcs/blockService_test.go b/rpcs/blockService_test.go
index fa934feeb..6b275a248 100644
--- a/rpcs/blockService_test.go
+++ b/rpcs/blockService_test.go
@@ -17,11 +17,14 @@
package rpcs
import (
+ "bytes"
"context"
+ "encoding/binary"
"fmt"
"io"
"net/http"
"strings"
+ "sync"
"testing"
"time"
@@ -41,6 +44,7 @@ import (
type mockUnicastPeer struct {
responseTopics network.Topics
+ outMsg network.OutgoingMessage
}
func (mup *mockUnicastPeer) GetAddress() string {
@@ -60,8 +64,9 @@ func (mup *mockUnicastPeer) GetConnectionLatency() time.Duration {
func (mup *mockUnicastPeer) Request(ctx context.Context, tag network.Tag, topics network.Topics) (resp *network.Response, e error) {
return nil, nil
}
-func (mup *mockUnicastPeer) Respond(ctx context.Context, reqMsg network.IncomingMessage, topics network.Topics) (e error) {
- mup.responseTopics = topics
+func (mup *mockUnicastPeer) Respond(ctx context.Context, reqMsg network.IncomingMessage, outMsg network.OutgoingMessage) (e error) {
+ mup.responseTopics = outMsg.Topics
+ mup.outMsg = outMsg
return nil
}
@@ -120,7 +125,7 @@ func TestHandleCatchupReqNegative(t *testing.T) {
require.Equal(t, roundNumberParseErrMsg, string(val))
}
-// TestRedirectFallbackArchiver tests the case when the block service fallback to another in the absense of a given block.
+// TestRedirectFallbackArchiver tests the case when the block service fallback to another in the absence of a given block.
func TestRedirectFallbackArchiver(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -279,6 +284,194 @@ func TestRedirectFallbackEndpoints(t *testing.T) {
require.Equal(t, http.StatusOK, response.StatusCode)
}
+// TestRedirectFallbackArchiver tests the case when the block service
+// fallback to another because its memory use it at capacity
+func TestRedirectOnFullCapacity(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ log1 := logging.TestingLog(t)
+ logBuffer1 := bytes.NewBuffer(nil)
+ log1.SetOutput(logBuffer1)
+
+ log2 := logging.TestingLog(t)
+ logBuffer2 := bytes.NewBuffer(nil)
+ log2.SetOutput(logBuffer2)
+
+ ledger1 := makeLedger(t, "l1")
+ defer ledger1.Close()
+ ledger2 := makeLedger(t, "l2")
+ defer ledger2.Close()
+ addBlock(t, ledger1)
+ l1Block2Ts := addBlock(t, ledger1)
+ addBlock(t, ledger2)
+ l2Block2Ts := addBlock(t, ledger2)
+ require.NotEqual(t, l1Block2Ts, l2Block2Ts)
+
+ net1 := &httpTestPeerSource{}
+ net2 := &httpTestPeerSource{}
+
+ config := config.GetDefaultLocal()
+ bs1 := MakeBlockService(log1, config, ledger1, net1, "test-genesis-ID")
+ bs2 := MakeBlockService(log2, config, ledger2, net2, "test-genesis-ID")
+ // set the memory cap so that it can serve only 1 block at a time
+ bs1.memoryCap = 250
+ bs2.memoryCap = 250
+
+ nodeA := &basicRPCNode{}
+ nodeB := &basicRPCNode{}
+
+ nodeA.RegisterHTTPHandler(BlockServiceBlockPath, bs1)
+ nodeA.start()
+ defer nodeA.stop()
+
+ nodeB.RegisterHTTPHandler(BlockServiceBlockPath, bs2)
+ nodeB.start()
+ defer nodeB.stop()
+
+ net1.addPeer(nodeB.rootURL())
+
+ parsedURL, err := network.ParseHostOrURL(nodeA.rootURL())
+ require.NoError(t, err)
+
+ client := http.Client{}
+
+ parsedURL.Path = FormatBlockQuery(uint64(2), parsedURL.Path, net1)
+ parsedURL.Path = strings.Replace(parsedURL.Path, "{genesisID}", "test-genesis-ID", 1)
+ blockURL := parsedURL.String()
+ request, err := http.NewRequest("GET", blockURL, nil)
+ require.NoError(t, err)
+ network.SetUserAgentHeader(request.Header)
+
+ var responses1, responses2, responses3, responses4 *http.Response
+ var blk bookkeeping.Block
+ var l2Failed bool
+ xDone := 1000
+ // Keep on sending 4 simultanious requests to the first node, to force it to redirect to node 2
+ // then check the timestamp from the block header to confirm the redirection took place
+ var x int
+forloop:
+ for ; x < xDone; x++ {
+ wg := sync.WaitGroup{}
+ wg.Add(4)
+ go func() {
+ defer wg.Done()
+ responses1, _ = client.Do(request)
+ }()
+ go func() {
+ defer wg.Done()
+ responses2, _ = client.Do(request)
+ }()
+ go func() {
+ defer wg.Done()
+ responses3, _ = client.Do(request)
+ }()
+ go func() {
+ defer wg.Done()
+ responses4, _ = client.Do(request)
+ }()
+
+ wg.Wait()
+ responses := [4]*http.Response{responses1, responses2, responses3, responses4}
+ for p := 0; p < 4; p++ {
+ if responses[p] == nil {
+ continue
+ }
+ if responses[p].StatusCode == http.StatusServiceUnavailable {
+ l2Failed = true
+ require.Equal(t, "3", responses[p].Header["Retry-After"][0])
+ continue
+ }
+ // parse the block to get the header timestamp
+ // timestamp is needed to know which node served the block
+ require.Equal(t, http.StatusOK, responses[p].StatusCode)
+ bodyData, err := io.ReadAll(responses[p].Body)
+ require.NoError(t, err)
+ require.NotEqual(t, 0, len(bodyData))
+ var blkCert PreEncodedBlockCert
+ err = protocol.DecodeReflect(bodyData, &blkCert)
+ require.NoError(t, err)
+ err = protocol.Decode(blkCert.Block, &blk)
+ require.NoError(t, err)
+ if blk.TimeStamp == l2Block2Ts && l2Failed {
+ break forloop
+ }
+ }
+ }
+ require.Less(t, x, xDone)
+ // check if redirection happened
+ require.Equal(t, blk.TimeStamp, l2Block2Ts)
+ // check if node 2 was also overwhelmed and responded with retry-after, since it cannod redirect
+ require.True(t, l2Failed)
+
+ // First node redirects, does not return retry
+ require.True(t, strings.Contains(logBuffer1.String(), "redirectRequest: redirected block request to"))
+ require.False(t, strings.Contains(logBuffer1.String(), "ServeHTTP: returned retry-after: block service memory over capacity"))
+
+ // Second node cannot redirect, it returns retry-after when over capacity
+ require.False(t, strings.Contains(logBuffer2.String(), "redirectRequest: redirected block request to"))
+ require.True(t, strings.Contains(logBuffer2.String(), "ServeHTTP: returned retry-after: block service memory over capacity"))
+}
+
+// TestWsBlockLimiting ensures that limits are applied correctly on the websocket side of the service
+func TestWsBlockLimiting(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ log := logging.TestingLog(t)
+ logBuffer := bytes.NewBuffer(nil)
+ log.SetOutput(logBuffer)
+
+ ledger := makeLedger(t, "l1")
+ defer ledger.Close()
+ addBlock(t, ledger)
+ addBlock(t, ledger)
+
+ net1 := &httpTestPeerSource{}
+
+ config := config.GetDefaultLocal()
+ bs1 := MakeBlockService(log, config, ledger, net1, "test-genesis-ID")
+ // set the memory cap so that it can serve only 1 block at a time
+ bs1.memoryCap = 250
+
+ peer := mockUnicastPeer{}
+ reqMsg := network.IncomingMessage{
+ Sender: &peer,
+ Tag: protocol.Tag("UE"),
+ }
+ roundBin := make([]byte, binary.MaxVarintLen64)
+ binary.PutUvarint(roundBin, uint64(2))
+ topics := network.Topics{
+ network.MakeTopic(RequestDataTypeKey,
+ []byte(BlockAndCertValue)),
+ network.MakeTopic(
+ RoundKey,
+ roundBin),
+ }
+ reqMsg.Data = topics.MarshallTopics()
+ require.Zero(t, bs1.wsMemoryUsed)
+ bs1.handleCatchupReq(context.Background(), reqMsg)
+ // We should have received the message into the mock peer and the block service should have memoryUsed > 0
+ data, found := peer.responseTopics.GetValue(BlockDataKey)
+ require.True(t, found)
+ blk, _, err := ledger.EncodedBlockCert(basics.Round(2))
+ require.NoError(t, err)
+ require.Equal(t, data, blk)
+ require.Positive(t, bs1.wsMemoryUsed)
+
+ // Before making a new request save the callback since the new failed message will overwrite it in the mock peer
+ callback := peer.outMsg.OnRelease
+
+ // Now we should be over the max and the block service should not return a block
+ // and should return an error instead
+ bs1.handleCatchupReq(context.Background(), reqMsg)
+ _, found = peer.responseTopics.GetValue(network.ErrorKey)
+ require.True(t, found)
+
+ // Now call the callback to free up memUsed
+ require.Nil(t, peer.outMsg.OnRelease)
+ callback()
+ require.Zero(t, bs1.wsMemoryUsed)
+}
+
// TestRedirectExceptions tests exception cases:
// - the case when the peer is not a valid http peer
// - the case when the block service keeps redirecting and cannot get a block
@@ -358,11 +551,11 @@ func makeLedger(t *testing.T, namePostfix string) *data.Ledger {
return ledger
}
-func addBlock(t *testing.T, ledger *data.Ledger) {
+func addBlock(t *testing.T, ledger *data.Ledger) (timestamp int64) {
blk, err := ledger.Block(ledger.LastRound())
require.NoError(t, err)
blk.BlockHeader.Round++
- blk.BlockHeader.TimeStamp += int64(crypto.RandUint64() % 100 * 1000)
+ blk.BlockHeader.TimeStamp += int64(crypto.RandUint64() % 100000 * 1000)
blk.TxnCommitments, err = blk.PaysetCommit()
require.NoError(t, err)
@@ -375,4 +568,13 @@ func addBlock(t *testing.T, ledger *data.Ledger) {
hdr, err := ledger.BlockHdr(blk.BlockHeader.Round)
require.NoError(t, err)
require.Equal(t, blk.BlockHeader, hdr)
+ return blk.BlockHeader.TimeStamp
+}
+
+func TestErrMemoryAtCapacity(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ macError := errMemoryAtCapacity{capacity: uint64(100), used: uint64(110)}
+ errStr := macError.Error()
+ require.Equal(t, "block service memory over capacity: 110 / 100", errStr)
}
diff --git a/rpcs/ledgerService.go b/rpcs/ledgerService.go
index b7af358a3..8abf87e3b 100644
--- a/rpcs/ledgerService.go
+++ b/rpcs/ledgerService.go
@@ -30,8 +30,8 @@ import (
"github.com/gorilla/mux"
"github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/data"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
@@ -54,11 +54,17 @@ const (
expectedWorstUploadSpeedBytesPerSecond = 20 * 1024
)
+// LedgerForService defines the ledger interface required for the LedgerService
+type LedgerForService interface {
+ // GetCatchpointStream returns the ReadCloseSize for a request catchpoint round
+ GetCatchpointStream(round basics.Round) (ledger.ReadCloseSizer, error)
+}
+
// LedgerService represents the Ledger RPC API
type LedgerService struct {
// running is non-zero once the service is running, and zero when it's not running. it needs to be at a 32-bit aligned address for RasPI support.
running int32
- ledger *data.Ledger
+ ledger LedgerForService
genesisID string
net network.GossipNode
enableService bool
@@ -66,7 +72,7 @@ type LedgerService struct {
}
// MakeLedgerService creates a LedgerService around the provider Ledger and registers it with the HTTP router
-func MakeLedgerService(config config.Local, ledger *data.Ledger, net network.GossipNode, genesisID string) *LedgerService {
+func MakeLedgerService(config config.Local, ledger LedgerForService, net network.GossipNode, genesisID string) *LedgerService {
service := &LedgerService{
ledger: ledger,
genesisID: genesisID,
@@ -111,7 +117,7 @@ func (ls *LedgerService) ServeHTTP(response http.ResponseWriter, request *http.R
genesisID, hasGenesisID := pathVars["genesisID"]
if hasVersionStr {
if versionStr != "1" {
- logging.Base().Debugf("http ledger bad version '%s'", versionStr)
+ logging.Base().Debugf("LedgerService.ServeHTTP: bad version '%s'", versionStr)
response.WriteHeader(http.StatusBadRequest)
response.Write([]byte(fmt.Sprintf("unsupported version '%s'", versionStr)))
return
@@ -119,13 +125,13 @@ func (ls *LedgerService) ServeHTTP(response http.ResponseWriter, request *http.R
}
if hasGenesisID {
if ls.genesisID != genesisID {
- logging.Base().Debugf("http ledger bad genesisID mine=%#v theirs=%#v", ls.genesisID, genesisID)
+ logging.Base().Debugf("LedgerService.ServeHTTP: bad genesisID mine=%#v theirs=%#v", ls.genesisID, genesisID)
response.WriteHeader(http.StatusBadRequest)
response.Write([]byte(fmt.Sprintf("mismatching genesisID '%s'", genesisID)))
return
}
} else {
- logging.Base().Debug("http ledger no genesisID")
+ logging.Base().Debug("LedgerService.ServeHTTP: no genesisID")
response.WriteHeader(http.StatusBadRequest)
response.Write([]byte("missing genesisID"))
return
@@ -135,14 +141,14 @@ func (ls *LedgerService) ServeHTTP(response http.ResponseWriter, request *http.R
request.Body = http.MaxBytesReader(response, request.Body, ledgerServerMaxBodyLength)
err := request.ParseForm()
if err != nil {
- logging.Base().Debugf("http ledger parse form err : %v", err)
+ logging.Base().Debugf("LedgerService.ServeHTTP: parse form err : %v", err)
response.WriteHeader(http.StatusBadRequest)
response.Write([]byte(fmt.Sprintf("unable to parse form body : %v", err)))
return
}
roundStrs, ok := request.Form["r"]
if !ok || len(roundStrs) != 1 {
- logging.Base().Debugf("http ledger bad round number form arg '%s'", roundStrs)
+ logging.Base().Debugf("LedgerService.ServeHTTP: bad round number form arg '%s'", roundStrs)
response.WriteHeader(http.StatusBadRequest)
response.Write([]byte("invalid round number specified in 'r' form argument"))
return
@@ -152,13 +158,13 @@ func (ls *LedgerService) ServeHTTP(response http.ResponseWriter, request *http.R
if ok {
if len(versionStrs) == 1 {
if versionStrs[0] != "1" {
- logging.Base().Debugf("http ledger bad version '%s'", versionStr)
+ logging.Base().Debugf("LedgerService.ServeHTTP: bad version '%s'", versionStr)
response.WriteHeader(http.StatusBadRequest)
response.Write([]byte(fmt.Sprintf("unsupported version specified '%s'", versionStrs[0])))
return
}
} else {
- logging.Base().Debugf("http ledger wrong number of v=%d args", len(versionStrs))
+ logging.Base().Debugf("LedgerService.ServeHTTP: wrong number of v=%d args", len(versionStrs))
response.WriteHeader(http.StatusBadRequest)
response.Write([]byte(fmt.Sprintf("invalid number of version specified %d", len(versionStrs))))
return
@@ -167,11 +173,13 @@ func (ls *LedgerService) ServeHTTP(response http.ResponseWriter, request *http.R
}
round, err := strconv.ParseUint(roundStr, 36, 64)
if err != nil {
- logging.Base().Debugf("http ledger round parse fail ('%s'): %v", roundStr, err)
+ logging.Base().Debugf("LedgerService.ServeHTTP: round parse fail ('%s'): %v", roundStr, err)
response.WriteHeader(http.StatusBadRequest)
response.Write([]byte(fmt.Sprintf("specified round number could not be parsed using base 36 : %v", err)))
return
}
+ logging.Base().Infof("LedgerService.ServeHTTP: serving catchpoint round %d", round)
+ start := time.Now()
cs, err := ls.ledger.GetCatchpointStream(basics.Round(round))
if err != nil {
switch err.(type) {
@@ -182,13 +190,18 @@ func (ls *LedgerService) ServeHTTP(response http.ResponseWriter, request *http.R
return
default:
// unexpected error.
- logging.Base().Warnf("ServeHTTP : failed to retrieve catchpoint %d %v", round, err)
+ logging.Base().Warnf("LedgerService.ServeHTTP : failed to retrieve catchpoint %d %v", round, err)
response.WriteHeader(http.StatusInternalServerError)
response.Write([]byte(fmt.Sprintf("catchpoint file for round %d could not be retrieved due to internal error : %v", round, err)))
return
}
}
defer cs.Close()
+ response.Header().Set("Content-Type", LedgerResponseContentType)
+ if request.Method == http.MethodHead {
+ response.WriteHeader(http.StatusOK)
+ return
+ }
if conn := ls.net.GetHTTPRequestConnection(request); conn != nil {
maxCatchpointFileWritingDuration := 2 * time.Minute
@@ -203,7 +216,6 @@ func (ls *LedgerService) ServeHTTP(response http.ResponseWriter, request *http.R
logging.Base().Warnf("LedgerService.ServeHTTP unable to set connection timeout")
}
- response.Header().Set("Content-Type", LedgerResponseContentType)
requestedCompressedResponse := strings.Contains(request.Header.Get("Accept-Encoding"), "gzip")
if requestedCompressedResponse {
response.Header().Set("Content-Encoding", "gzip")
@@ -211,6 +223,8 @@ func (ls *LedgerService) ServeHTTP(response http.ResponseWriter, request *http.R
if err != nil {
logging.Base().Infof("LedgerService.ServeHTTP : unable to write compressed catchpoint file for round %d, written bytes %d : %v", round, written, err)
}
+ elapsed := time.Since(start)
+ logging.Base().Infof("LedgerService.ServeHTTP: served catchpoint round %d in %d sec", round, int(elapsed.Seconds()))
return
}
decompressedGzip, err := gzip.NewReader(cs)
@@ -224,5 +238,8 @@ func (ls *LedgerService) ServeHTTP(response http.ResponseWriter, request *http.R
written, err := io.Copy(response, decompressedGzip)
if err != nil {
logging.Base().Infof("LedgerService.ServeHTTP : unable to write decompressed catchpoint file for round %d, written bytes %d : %v", round, written, err)
+ } else {
+ elapsed := time.Since(start)
+ logging.Base().Infof("LedgerService.ServeHTTP: served catchpoint round %d in %d sec", round, int(elapsed.Seconds()))
}
}
diff --git a/rpcs/ledgerService_test.go b/rpcs/ledgerService_test.go
new file mode 100644
index 000000000..6b01cf0e1
--- /dev/null
+++ b/rpcs/ledgerService_test.go
@@ -0,0 +1,174 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package rpcs
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "strconv"
+ "testing"
+
+ "github.com/gorilla/mux"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/network"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+type fakeNetwork struct {
+ network.GossipNode
+ router *mux.Router
+ *mock.Mock
+}
+
+func (fnet *fakeNetwork) RegisterHTTPHandler(path string, handler http.Handler) {
+ fnet.router.Handle(path, handler)
+ fnet.Called(path, handler)
+}
+
+type fakeLedger struct {
+ *mock.Mock
+}
+
+func (fledger *fakeLedger) GetCatchpointStream(round basics.Round) (ledger.ReadCloseSizer, error) {
+ args := fledger.Called(round)
+ return args.Get(0).(ledger.ReadCloseSizer), args.Error(1)
+}
+
+type readCloseSizer struct {
+ io.ReadCloser
+ *mock.Mock
+}
+
+func (r readCloseSizer) Size() (int64, error) {
+ args := r.Called()
+ return int64(args.Int(0)), args.Error(1)
+}
+
+func (r readCloseSizer) Close() error {
+ return nil
+}
+
+func TestLedgerService(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ genesisID := "testGenesisID"
+ cfg := config.GetDefaultLocal()
+ l := fakeLedger{Mock: &mock.Mock{}}
+ fnet := fakeNetwork{router: mux.NewRouter(), Mock: &mock.Mock{}}
+
+ // Test LedgerService not enabled
+ cfg.EnableLedgerService = false
+ ledgerService := MakeLedgerService(cfg, &l, &fnet, genesisID)
+ fnet.AssertNotCalled(t, "RegisterHTTPHandler", LedgerServiceLedgerPath, ledgerService)
+ ledgerService.Start()
+ require.Equal(t, int32(0), ledgerService.running)
+
+ // Test GET 404
+ rr := httptest.NewRecorder()
+ req, err := http.NewRequest("GET", "/", nil)
+ require.NoError(t, err)
+ ledgerService.ServeHTTP(rr, req)
+ require.Equal(t, http.StatusNotFound, rr.Code)
+
+ // Test LedgerService enabled
+ cfg.EnableLedgerService = true
+ fnet.On("RegisterHTTPHandler", LedgerServiceLedgerPath, mock.Anything).Return()
+ ledgerService = MakeLedgerService(cfg, &l, &fnet, genesisID)
+ fnet.AssertCalled(t, "RegisterHTTPHandler", LedgerServiceLedgerPath, ledgerService)
+ ledgerService.Start()
+ require.Equal(t, int32(1), ledgerService.running)
+
+ // Test GET 400 Bad Version String
+ rr = httptest.NewRecorder()
+ req, err = http.NewRequest("GET", "/v2/foobar/ledger/23", nil)
+ require.NoError(t, err)
+ fnet.router.ServeHTTP(rr, req)
+ require.Equal(t, http.StatusBadRequest, rr.Code)
+ require.Contains(t, rr.Body.String(), "unsupported version '2'")
+
+ // Test Get 400 Bad Genesis ID
+ rr = httptest.NewRecorder()
+ req, err = http.NewRequest("GET", "/v1/foobar/ledger/23", nil)
+ require.NoError(t, err)
+ fnet.router.ServeHTTP(rr, req)
+ require.Equal(t, http.StatusBadRequest, rr.Code)
+ require.Contains(t, rr.Body.String(), "mismatching genesisID 'foobar'")
+
+ // Test Get 400 No Path Vars
+ rr = httptest.NewRecorder()
+ req, err = http.NewRequest("GET", "", nil)
+ require.NoError(t, err)
+ ledgerService.ServeHTTP(rr, req)
+ require.Equal(t, http.StatusBadRequest, rr.Code)
+ require.Contains(t, rr.Body.String(), "missing genesisID")
+
+ // Not Testing non path var handling because I'm not convinced it's reachable given `LedgerServiceLedgerPath`
+
+ // Test Get 400 round out of range
+ rr = httptest.NewRecorder()
+ req, err = http.NewRequest("GET", fmt.Sprintf("/v1/%s/ledger/zzzzzzzzzzzzzzzzzzzzzzz", genesisID), nil)
+ require.NoError(t, err)
+ fnet.router.ServeHTTP(rr, req)
+ require.Equal(t, http.StatusBadRequest, rr.Code)
+ require.Contains(t, rr.Body.String(), "specified round number could not be parsed using base 36 : ")
+
+ // Test Get Catchpoint Not Found
+ rr = httptest.NewRecorder()
+ rnd := 1111
+ b36Rnd, err := strconv.ParseUint(fmt.Sprintf("%d", rnd), 36, 64)
+ require.NoError(t, err)
+ req, err = http.NewRequest("GET", fmt.Sprintf("/v1/%s/ledger/%d", genesisID, rnd), nil)
+ require.NoError(t, err)
+ rcs := readCloseSizer{Mock: &mock.Mock{}}
+ gcp := l.On("GetCatchpointStream", basics.Round(b36Rnd)).Return(&rcs, ledgercore.ErrNoEntry{Round: basics.Round(rnd)})
+ fnet.router.ServeHTTP(rr, req)
+ require.Equal(t, http.StatusNotFound, rr.Code)
+ require.Contains(t, rr.Body.String(), fmt.Sprintf("catchpoint file for round %d is not available", b36Rnd))
+
+ // Test Get Catchpoint Unexpected Error
+ rr = httptest.NewRecorder()
+ require.NoError(t, err)
+ req, err = http.NewRequest("GET", fmt.Sprintf("/v1/%s/ledger/%d", genesisID, rnd), nil)
+ require.NoError(t, err)
+ gcp.Unset()
+ gcp = l.On("GetCatchpointStream", basics.Round(b36Rnd)).Return(&rcs, ledgercore.ErrNoSpace)
+ fnet.router.ServeHTTP(rr, req)
+ require.Equal(t, http.StatusInternalServerError, rr.Code)
+ require.Contains(t, rr.Body.String(), fmt.Sprintf("catchpoint file for round %d could not be retrieved due to internal error : ", b36Rnd))
+
+ // Test HEAD Catchpoint 200
+ rr = httptest.NewRecorder()
+ require.NoError(t, err)
+ req, err = http.NewRequest("HEAD", fmt.Sprintf("/v1/%s/ledger/%d", genesisID, rnd), nil)
+ require.NoError(t, err)
+ gcp.Unset()
+ gcp = l.On("GetCatchpointStream", basics.Round(b36Rnd)).Return(&rcs, nil)
+ fnet.router.ServeHTTP(rr, req)
+ require.Equal(t, http.StatusOK, rr.Code)
+ require.Equal(t, LedgerResponseContentType, rr.Header().Get("Content-Type"))
+
+ // Test LedgerService Stopped
+ ledgerService.Stop()
+ require.Equal(t, int32(0), ledgerService.running)
+}
diff --git a/rpcs/msgp_gen.go b/rpcs/msgp_gen.go
index b59141c97..5f8af433f 100644
--- a/rpcs/msgp_gen.go
+++ b/rpcs/msgp_gen.go
@@ -4,6 +4,9 @@ package rpcs
import (
"github.com/algorand/msgp/msgp"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/data/bookkeeping"
)
// The following msgp objects are implemented in this file:
@@ -14,6 +17,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> EncodedBlockCertMaxSize()
//
// MarshalMsg implements msgp.Marshaler
@@ -126,3 +130,9 @@ func (z *EncodedBlockCert) Msgsize() (s int) {
func (z *EncodedBlockCert) MsgIsZero() bool {
return ((*z).Block.MsgIsZero()) && ((*z).Certificate.MsgIsZero())
}
+
+// MaxSize returns a maximum valid message size for this message type
+func EncodedBlockCertMaxSize() (s int) {
+ s = 1 + 6 + bookkeeping.BlockMaxSize() + 5 + agreement.CertificateMaxSize()
+ return
+}
diff --git a/scripts/buildtools/versions b/scripts/buildtools/versions
index 01e0e9006..f2f5401fe 100644
--- a/scripts/buildtools/versions
+++ b/scripts/buildtools/versions
@@ -1,7 +1,7 @@
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616
-golang.org/x/tools v0.1.5
-github.com/algorand/msgp v1.1.53
+golang.org/x/tools v0.9.3
+github.com/algorand/msgp v1.1.55
github.com/algorand/oapi-codegen v1.12.0-algorand.0
-github.com/go-swagger/go-swagger v0.25.0
-gotest.tools/gotestsum v1.6.4
-github.com/golangci/golangci-lint/cmd/golangci-lint v1.47.3
+github.com/go-swagger/go-swagger v0.30.4
+gotest.tools/gotestsum v1.10.0
+github.com/golangci/golangci-lint/cmd/golangci-lint v1.53.2
diff --git a/scripts/check_deps.sh b/scripts/check_deps.sh
index 95c7599f7..a296c11b9 100755
--- a/scripts/check_deps.sh
+++ b/scripts/check_deps.sh
@@ -73,12 +73,6 @@ check_deps() {
then
missing_dep shellcheck
fi
-
- # Don't print `sqlite3`s location.
- if ! which sqlite3 > /dev/null
- then
- missing_dep sqlite3
- fi
}
check_deps
diff --git a/scripts/configure_dev.sh b/scripts/configure_dev.sh
index df28dd5cc..474e869fb 100755
--- a/scripts/configure_dev.sh
+++ b/scripts/configure_dev.sh
@@ -77,7 +77,6 @@ elif [ "${OS}" = "darwin" ]; then
brew tap homebrew/cask
fi
install_or_upgrade pkg-config
- install_or_upgrade boost
install_or_upgrade libtool
install_or_upgrade shellcheck
if [ "${CIRCLECI}" != "true" ]; then
@@ -90,7 +89,7 @@ elif [ "${OS}" = "darwin" ]; then
lnav -i "$SCRIPTPATH/algorand_node_log.json"
fi
elif [ "${OS}" = "windows" ]; then
- if ! $msys2 pacman -S --disable-download-timeout --noconfirm git automake autoconf m4 libtool make mingw-w64-x86_64-gcc mingw-w64-x86_64-boost mingw-w64-x86_64-python mingw-w64-x86_64-jq unzip procps; then
+ if ! $msys2 pacman -S --disable-download-timeout --noconfirm git automake autoconf m4 libtool make mingw-w64-x86_64-gcc mingw-w64-x86_64-python mingw-w64-x86_64-jq unzip procps; then
echo "Error installing pacman dependencies"
exit 1
fi
diff --git a/scripts/generate_beta_config.sh b/scripts/generate_beta_config.sh
deleted file mode 100755
index 999718c9f..000000000
--- a/scripts/generate_beta_config.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-export GOPATH=$(go env GOPATH)
-SRCPATH=${GOPATH}/src/github.com/algorand/go-algorand
-cd ${SRCPATH}
-
-make
-
-CHANNEL=beta
-RECIPE=${SRCPATH}/gen/networks/betanet/source/recipe.json
-OUTPUT=${SRCPATH}/gen/networks/betanet/config
-
-rm -rf ${OUTPUT}
-
-mkdir -p ${OUTPUT}/genesisdata
-cp ${SRCPATH}/gen/betanet/* ${OUTPUT}/genesisdata/
-rm ${OUTPUT}/genesisdata/genesis.dump
-
-${GOPATH}/bin/netgoal build --recipe "${RECIPE}" -n betanet -r "${OUTPUT}" --use-existing-files --force
-
-echo Uploading configuration package for channel ${CHANNEL}
-S3_RELEASE_BUCKET=algorand-internal scripts/upload_config.sh ${OUTPUT} ${CHANNEL}
diff --git a/scripts/generate_devnet_config.sh b/scripts/generate_devnet_config.sh
deleted file mode 100755
index 77c367dc8..000000000
--- a/scripts/generate_devnet_config.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-export GOPATH=$(go env GOPATH)
-SRCPATH=$(pwd)
-
-make
-
-CHANNEL=nightly
-CONFIG=${SRCPATH}/gen/networks/devnet/source/devnet.json
-HOSTTEMPLATES=${SRCPATH}/gen/networks/devnet/source/hosttemplates.json
-TOPOLOGY=${SRCPATH}/gen/networks/devnet/source/5Hosts5Relays20Nodes.json
-OUTPUT=${SRCPATH}/gen/networks/devnet/config
-
-rm -rf ${OUTPUT}
-
-mkdir -p ${OUTPUT}/genesisdata
-cp ${SRCPATH}/gen/devnet/* ${OUTPUT}/genesisdata/
-rm ${OUTPUT}/genesisdata/genesis.dump
-
-${GOPATH}/bin/netgoal build -c "${CONFIG}" -H "${HOSTTEMPLATES}" -n devnet -t "${TOPOLOGY}" -r "${OUTPUT}" --use-existing-files --force
-
-echo Uploading configuration package for channel ${CHANNEL}
-scripts/upload_config.sh ${OUTPUT} ${CHANNEL}
diff --git a/scripts/generate_testnet_config.sh b/scripts/generate_testnet_config.sh
deleted file mode 100755
index 1ce7a4205..000000000
--- a/scripts/generate_testnet_config.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-export GOPATH=$(go env GOPATH)
-SRCPATH=$(pwd)
-
-make
-
-CHANNEL=stable
-CONFIG=${SRCPATH}/gen/networks/testnet/source/testnet.json
-HOSTTEMPLATES=${SRCPATH}/gen/networks/testnet/source/hosttemplates.json
-TOPOLOGY=${SRCPATH}/gen/networks/testnet/source/5Hosts5Relays20Nodes.json
-OUTPUT=${SRCPATH}/gen/networks/testnet/config
-NETWORK=testnet
-
-rm -rf ${OUTPUT}
-
-mkdir -p ${OUTPUT}/genesisdata
-cp ${SRCPATH}/gen/testnet/* ${OUTPUT}/genesisdata/
-rm ${OUTPUT}/genesisdata/genesis.dump
-
-${GOPATH}/bin/netgoal build -c "${CONFIG}" -H "${HOSTTEMPLATES}" -n ${NETWORK} -t "${TOPOLOGY}" -r "${OUTPUT}" --use-existing-files --force
-
-echo Uploading configuration package for channel ${CHANNEL}
-scripts/upload_config.sh ${OUTPUT} ${CHANNEL}
diff --git a/scripts/get_golang_version.sh b/scripts/get_golang_version.sh
index 7036e716a..767fb0a49 100755
--- a/scripts/get_golang_version.sh
+++ b/scripts/get_golang_version.sh
@@ -11,9 +11,9 @@
# Our build task-runner `mule` will refer to this script and will automatically
# build a new image whenever the version number has been changed.
-BUILD=1.17.13
- MIN=1.17
- GO_MOD_SUPPORT=1.17
+BUILD=1.20.5
+ MIN=1.20
+ GO_MOD_SUPPORT=1.20
if [ "$1" = all ]
then
diff --git a/scripts/install_linux_deps.sh b/scripts/install_linux_deps.sh
index a51fdf7b5..97d4ab2cd 100755
--- a/scripts/install_linux_deps.sh
+++ b/scripts/install_linux_deps.sh
@@ -4,9 +4,9 @@ set -e
. /etc/os-release
DISTRIB=$ID
-ARCH_DEPS="boost boost-libs expect jq autoconf shellcheck sqlite python-virtualenv"
-UBUNTU_DEPS="libtool libboost-math-dev expect jq autoconf shellcheck sqlite3 python3-venv build-essential"
-FEDORA_DEPS="boost-devel expect jq autoconf ShellCheck sqlite python-virtualenv"
+ARCH_DEPS="expect jq autoconf shellcheck sqlite python-virtualenv"
+UBUNTU_DEPS="libtool expect jq autoconf shellcheck sqlite3 python3-venv build-essential"
+FEDORA_DEPS="expect jq autoconf ShellCheck sqlite python-virtualenv"
case $DISTRIB in
"arch" | "manjaro")
diff --git a/scripts/release/README.md b/scripts/release/README.md
index 4639743e7..f3fa543f1 100644
--- a/scripts/release/README.md
+++ b/scripts/release/README.md
@@ -52,7 +52,7 @@ This section briefly describes the expected outcomes of the current build pipeli
1. Build (compile) the binaries in a Centos 7 & 8 docker container that will then be used by both `deb` and `rpm` packaging.
- 1. Docker containers will package `deb` and `rpm` artifacts inside of Ubuntu 18.04 and Centos 7 & 8, respectively.
+ 1. Docker containers will package `deb` and `rpm` artifacts inside of Ubuntu 20.04 and Centos 7 & 8, respectively.
1. Jenkins will then pause to wait for [the only manual part of the build/package/test phase], which is to forward the `gpg-agent` that establishes a direct between the local machine that contains the signing keys and the remote ec2 instance.
@@ -62,7 +62,7 @@ This section briefly describes the expected outcomes of the current build pipeli
Download the `deb` and `rpm` packages from staging and test and locally.
- This will spin up a local python server that will host both an `APT` repository and a `YUM` repository in Ubuntu 18.04 and Centos 7 docker containers, respectively.
+ This will spin up a local python server that will host both an `APT` repository and a `YUM` repository in Ubuntu and Centos docker containers, respectively.
Each package is downloaded in the respective environment and the following tests are done:
@@ -71,9 +71,9 @@ This section briefly describes the expected outcomes of the current build pipeli
+ This is done for the following docker containers:
- centos:7
- quay.io/centos/centos:stream8
- - fedora:28
- - ubuntu:16.04
- - ubuntu:18.04
+ - fedora:38
+ - ubuntu:20.04
+ - ubuntu:22.04
- Creates a test network using `goal`.
- et. al.
diff --git a/scripts/release/common/docker/centos.Dockerfile b/scripts/release/common/docker/centos.Dockerfile
index eeed8f114..a23b446ca 100644
--- a/scripts/release/common/docker/centos.Dockerfile
+++ b/scripts/release/common/docker/centos.Dockerfile
@@ -2,7 +2,7 @@ FROM centos:7
WORKDIR /root
RUN yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
-RUN yum install -y autoconf awscli curl git gnupg2 nfs-utils python36 sqlite3 boost-devel expect jq libtool gcc-c++ libstdc++-devel libstdc++-static rpmdevtools createrepo rpm-sign bzip2 which ShellCheck
+RUN yum install -y autoconf awscli curl git gnupg2 nfs-utils python36 expect jq libtool gcc-c++ libstdc++-devel libstdc++-static rpmdevtools createrepo rpm-sign bzip2 which ShellCheck
ENTRYPOINT ["/bin/bash"]
diff --git a/scripts/release/common/docker/centos8.Dockerfile b/scripts/release/common/docker/centos8.Dockerfile
index 26be88df8..cf5474cfe 100644
--- a/scripts/release/common/docker/centos8.Dockerfile
+++ b/scripts/release/common/docker/centos8.Dockerfile
@@ -2,7 +2,7 @@ FROM quay.io/centos/centos:stream8
WORKDIR /root
RUN dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
- dnf install -y autoconf awscli curl git gnupg2 nfs-utils python36 sqlite boost-devel expect jq libtool gcc-c++ libstdc++-devel rpmdevtools createrepo rpm-sign bzip2 which && \
+ dnf install -y autoconf awscli curl git gnupg2 nfs-utils python36 expect jq libtool gcc-c++ libstdc++-devel rpmdevtools createrepo rpm-sign bzip2 which && \
dnf -y --enablerepo=powertools install libstdc++-static
RUN echo "${BOLD}Downloading and installing binaries...${RESET}" && \
diff --git a/scripts/release/common/docker/setup.Dockerfile b/scripts/release/common/docker/setup.Dockerfile
index b2c15664b..7d2988ca9 100644
--- a/scripts/release/common/docker/setup.Dockerfile
+++ b/scripts/release/common/docker/setup.Dockerfile
@@ -9,7 +9,7 @@
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=869194
# https://github.com/boto/s3transfer/pull/102
-FROM ubuntu:18.04
+FROM ubuntu:20.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y jq git python python-pip python3-boto3 ssh && \
pip install awscli
diff --git a/scripts/release/common/setup.sh b/scripts/release/common/setup.sh
index 13a0548d9..75683262d 100755
--- a/scripts/release/common/setup.sh
+++ b/scripts/release/common/setup.sh
@@ -25,7 +25,7 @@ sudo apt-get upgrade -y
# `apt-get` fails randomly when downloading package, this is a hack that "works" reasonably well.
sudo apt-get update
-sudo apt-get install -y build-essential automake autoconf awscli docker.io git gpg nfs-common python python3 rpm sqlite3 python3-boto3 g++ libtool rng-tools
+sudo apt-get install -y build-essential automake autoconf awscli docker.io git gpg nfs-common python python3 rpm python3-boto3 g++ libtool rng-tools
sudo rngd -r /dev/urandom
#umask 0077
@@ -106,7 +106,7 @@ fi
sudo usermod -a -G docker ubuntu
sg docker "docker pull centos:7"
sg docker "docker pull quay.io/centos/centos:stream8"
-sg docker "docker pull ubuntu:18.04"
+sg docker "docker pull ubuntu:22.04"
cat << EOF >> "${HOME}/.bashrc"
export EDITOR=vi
diff --git a/scripts/release/test/util/test_package.sh b/scripts/release/test/util/test_package.sh
index 65c4ab022..61c93b84b 100755
--- a/scripts/release/test/util/test_package.sh
+++ b/scripts/release/test/util/test_package.sh
@@ -10,9 +10,9 @@ set -ex
OS_LIST=(
centos:7
quay.io/centos/centos:stream8
- fedora:28
- ubuntu:16.04
- ubuntu:18.04
+ fedora:38
+ ubuntu:20.04
+ ubuntu:22.04
)
FAILED=()
diff --git a/scripts/travis/codegen_verification.sh b/scripts/travis/codegen_verification.sh
index 356b8444e..53ad607a4 100755
--- a/scripts/travis/codegen_verification.sh
+++ b/scripts/travis/codegen_verification.sh
@@ -4,7 +4,7 @@
#
# Syntax: codegen_verification.sh
#
-# Usage: Can be used by either Travis or an ephermal build machine
+# Usage: Can be used by either Travis or an ephemeral build machine
#
# Examples: scripts/travis/codegen_verification.sh
set -e
diff --git a/scripts/travis/configure_dev.sh b/scripts/travis/configure_dev.sh
index a45a5a713..82cc9c25d 100755
--- a/scripts/travis/configure_dev.sh
+++ b/scripts/travis/configure_dev.sh
@@ -18,14 +18,6 @@ if [[ "${OS}" == "linux" ]]; then
sudo apt-get update -y
sudo apt-get -y install sqlite3
fi
-elif [[ "${OS}" == "darwin" ]]; then
- # we don't want to upgrade boost if we already have it, as it will try to update
- # other components.
- if [ "${CIRCLECI}" != "true" ]; then
- brew update
- brew tap homebrew/cask
- brew pin boost || true
- fi
elif [[ "${OS}" == "windows" ]]; then
git config --global core.autocrlf true
# Golang probably is not installed under MSYS2 so add the environment variable temporarily
diff --git a/shared/pingpong/accounts.go b/shared/pingpong/accounts.go
index ea9d74cc0..0471d84be 100644
--- a/shared/pingpong/accounts.go
+++ b/shared/pingpong/accounts.go
@@ -197,9 +197,9 @@ func (pps *WorkerState) ensureAccounts(ac *libgoal.Client) (err error) {
srcAcctPresent = true
}
- ai, err := ac.AccountInformation(addr, true)
- if err != nil {
- return err
+ ai, aiErr := ac.AccountInformation(addr, true)
+ if aiErr != nil {
+ return aiErr
}
amt := ai.Amount
diff --git a/shared/pingpong/pingpong.go b/shared/pingpong/pingpong.go
index 32352bcfc..112105679 100644
--- a/shared/pingpong/pingpong.go
+++ b/shared/pingpong/pingpong.go
@@ -295,17 +295,19 @@ func (pps *WorkerState) scheduleAction() bool {
}
pps.refreshPos = 0
}
- addr := pps.refreshAddrs[pps.refreshPos]
- ai, err := pps.client.AccountInformation(addr, true)
- if err == nil {
- ppa := pps.accounts[addr]
+ if pps.cfg.NumApp > 0 || pps.cfg.NumAsset > 0 {
+ addr := pps.refreshAddrs[pps.refreshPos]
+ ai, err := pps.client.AccountInformation(addr, true)
+ if err == nil {
+ ppa := pps.accounts[addr]
- pps.integrateAccountInfo(addr, ppa, ai)
- } else {
- if !pps.cfg.Quiet {
- fmt.Printf("background refresh err: %v\n", err)
+ pps.integrateAccountInfo(addr, ppa, ai)
+ } else {
+ if !pps.cfg.Quiet {
+ fmt.Printf("background refresh err: %v\n", err)
+ }
+ return false
}
- return false
}
pps.refreshPos++
return true
diff --git a/stateproof/README.md b/stateproof/README.md
new file mode 100644
index 000000000..c1a2115ae
--- /dev/null
+++ b/stateproof/README.md
@@ -0,0 +1,46 @@
+# StateProof
+
+## Background
+
+A State Proof is a cryptographic proof of state changes that occur in a given set of blocks. State Proofs are created and signed by the network.
+The same participants that reach consensus on new blocks sign a message attesting to a summary of recent Algorand transactions.
+These signatures are then compressed into a compact certificate of collective knowledge, also known as a State Proof.
+After a State Proof is created, a State Proof transaction, which includes the State Proof and the message it proves, is created and sent to the Algorand network for validation.
+The transaction goes through consensus like any other pending Algorand transaction: it gets validated by participation nodes, included in a block proposal, and written to the blockchain.
+
+The crypto package implements the cryptography behind State Proofs. This package, stateproof, implements the orchestration logic for State Proofs.
+Specifically, it is responsible for the following:
+- Producing signatures for State Proof messages for online accounts.
+- Collecting signatures in order to create a State Proof.
+- Gathering block information and online account balances from the ledger.
+- Generating the State Proof transactions
+
+
+## State Proof Modules
+
+- `verify` responsible for making a decision on whether or not to accept a State Proof for the round
+ in which it is was proposed. The network aims to accept the most compact State Proof
+ it can produce while also producing a State Proof every `StateProofInterval` rounds.
+ For this reason, the network might not accept a valid State Proof when there is a chance a better (more compact) State Proof could be produced.
+- `Signer` A go-routine that is triggered on every new block.
+ - Generates the State Proof message when needed.
+ - Signs the message using every online account's private State Proof key.
+ - Persists signatures into the State Proof database.
+- `Builder` A go-routine that is triggered on every new block.
+ - Broadcasts participants' signatures over gossip. In order to prevent network congestion, every address has a designated round slot
+ in which it can send its signature.
+ - Creates a State Proof transaction and passes it to the transaction pool once enough SignedWeight is collected. It does this by
+ keeping track of `stateproof.Prover` for every target State Proof round.
+ - Responsible for removing `stateproof.Prover` data structure, signatures, and ephemeral keys once the relevant State Proof is committed.
+
+ In addition, the `Builder` module implements the signature verification handling procedure. A relay invokes this procedure on every signature it receives
+ to make sure that it collects only valid signatures for the State Proof.
+
+## State Proof Chain Liveness
+
+The Algorand ledger only stores a limited number of historical blocks and online account balances (needed for the creation of State Proofs). If the State Proof
+chain were to lag behind regular consensus, this could theoretically make it impossible to create new State Proofs. For this reason, the `Builder`
+maintains its own database and backs-up necessary data from the ledger so that it can create State Proofs even if the ledger is far ahead.
+
+On catchup scenarios, The `Builder` gets notified (`OnPrepareVoterCommit`) before the ledger removes data and stores it into the State Proof database.
+
diff --git a/stateproof/builder.go b/stateproof/builder.go
index 28d5d2b47..13f4aa5b9 100644
--- a/stateproof/builder.go
+++ b/stateproof/builder.go
@@ -22,7 +22,6 @@ import (
"encoding/binary"
"errors"
"fmt"
- "sort"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto/stateproof"
@@ -35,6 +34,8 @@ import (
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/stateproof/verify"
+ "golang.org/x/exp/maps"
+ "golang.org/x/exp/slices"
)
var errVotersNotTracked = errors.New("voters not tracked for the given lookback round")
@@ -270,9 +271,9 @@ func (spw *Worker) getAllOnlineProverRounds() ([]basics.Round, error) {
var rnds []basics.Round
err = spw.db.Atomic(func(_ context.Context, tx *sql.Tx) error {
- var err error
- rnds, err = getSignatureRounds(tx, threshold, latestStateProofRound)
- return err
+ var err2 error
+ rnds, err2 = getSignatureRounds(tx, threshold, latestStateProofRound)
+ return err2
})
return rnds, err
@@ -641,11 +642,8 @@ func (spw *Worker) tryBroadcast() {
spw.mu.Lock()
defer spw.mu.Unlock()
- sortedRounds := make([]basics.Round, 0, len(spw.provers))
- for rnd := range spw.provers {
- sortedRounds = append(sortedRounds, rnd)
- }
- sort.Slice(sortedRounds, func(i, j int) bool { return sortedRounds[i] < sortedRounds[j] })
+ sortedRounds := maps.Keys(spw.provers)
+ slices.Sort(sortedRounds)
for _, rnd := range sortedRounds {
// Iterate over the provers in a sequential manner. If the earlist state proof is not ready/rejected
diff --git a/stateproof/db.go b/stateproof/db.go
index f0b0e5cc1..46618a1f7 100644
--- a/stateproof/db.go
+++ b/stateproof/db.go
@@ -183,7 +183,7 @@ func rowsToPendingSigs(rows *sql.Rows) (map[basics.Round][]pendingSig, error) {
//#endregion
-//#region Prover Operations
+// #region Prover Operations
func persistProver(tx *sql.Tx, rnd basics.Round, b *spProver) error {
_, err := tx.Exec(insertOrReplaceProverForRound, rnd, protocol.Encode(b))
return err
diff --git a/stateproof/msgp_gen.go b/stateproof/msgp_gen.go
index db80b731a..11ad40933 100644
--- a/stateproof/msgp_gen.go
+++ b/stateproof/msgp_gen.go
@@ -7,7 +7,11 @@ import (
"github.com/algorand/msgp/msgp"
+ "github.com/algorand/go-algorand/crypto/merklesignature"
"github.com/algorand/go-algorand/crypto/stateproof"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/stateproofmsg"
)
// The following msgp objects are implemented in this file:
@@ -18,6 +22,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> SigFromAddrMaxSize()
//
// spProver
// |-----> (*) MarshalMsg
@@ -26,6 +31,7 @@ import (
// |-----> (*) CanUnmarshalMsg
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
+// |-----> SpProverMaxSize()
//
// MarshalMsg implements msgp.Marshaler
@@ -180,6 +186,12 @@ func (z *sigFromAddr) MsgIsZero() bool {
return ((*z).SignerAddress.MsgIsZero()) && ((*z).Round.MsgIsZero()) && ((*z).Sig.MsgIsZero())
}
+// MaxSize returns a maximum valid message size for this message type
+func SigFromAddrMaxSize() (s int) {
+ s = 1 + 2 + basics.AddressMaxSize() + 2 + basics.RoundMaxSize() + 2 + merklesignature.SignatureMaxSize()
+ return
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *spProver) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
@@ -213,7 +225,7 @@ func (z *spProver) MarshalMsg(b []byte) (o []byte) {
} else {
o = msgp.AppendMapHeader(o, uint32(len((*z).AddrToPos)))
}
- zb0001_keys := make([]Address, 0, len((*z).AddrToPos))
+ zb0001_keys := make([]basics.Address, 0, len((*z).AddrToPos))
for zb0001 := range (*z).AddrToPos {
zb0001_keys = append(zb0001_keys, zb0001)
}
@@ -305,7 +317,7 @@ func (z *spProver) UnmarshalMsg(bts []byte) (o []byte, err error) {
(*z).AddrToPos = make(map[Address]uint64, zb0005)
}
for zb0005 > 0 {
- var zb0001 Address
+ var zb0001 basics.Address
var zb0002 uint64
zb0005--
bts, err = zb0001.UnmarshalMsg(bts)
@@ -396,7 +408,7 @@ func (z *spProver) UnmarshalMsg(bts []byte) (o []byte, err error) {
(*z).AddrToPos = make(map[Address]uint64, zb0007)
}
for zb0007 > 0 {
- var zb0001 Address
+ var zb0001 basics.Address
var zb0002 uint64
zb0007--
bts, err = zb0001.UnmarshalMsg(bts)
@@ -465,3 +477,17 @@ func (z *spProver) Msgsize() (s int) {
func (z *spProver) MsgIsZero() bool {
return ((*z).Prover == nil) && (len((*z).AddrToPos) == 0) && ((*z).VotersHdr.MsgIsZero()) && ((*z).Message.MsgIsZero())
}
+
+// MaxSize returns a maximum valid message size for this message type
+func SpProverMaxSize() (s int) {
+ s = 1 + 4
+ s += stateproof.ProverMaxSize()
+ s += 5
+ s += msgp.MapHeaderSize
+ // Adding size of map keys for z.AddrToPos
+ s += stateproof.VotersAllocBound * (basics.AddressMaxSize())
+ // Adding size of map values for z.AddrToPos
+ s += stateproof.VotersAllocBound * (msgp.Uint64Size)
+ s += 4 + bookkeeping.BlockHeaderMaxSize() + 4 + stateproofmsg.MessageMaxSize()
+ return
+}
diff --git a/stateproof/stateproofMessageGenerator.go b/stateproof/stateproofMessageGenerator.go
index 3b5008f2a..c51d76719 100644
--- a/stateproof/stateproofMessageGenerator.go
+++ b/stateproof/stateproofMessageGenerator.go
@@ -35,6 +35,7 @@ var errOutOfBound = errors.New("request pos is out of array bounds")
var errProvenWeightOverflow = errors.New("overflow computing provenWeight")
// The Array implementation for block headers, required to build the merkle tree from them.
+//
//msgp:ignore lightBlockHeaders
type lightBlockHeaders []bookkeeping.LightBlockHeader
diff --git a/stateproof/worker.go b/stateproof/worker.go
index acc55a5d6..e9aa84f85 100644
--- a/stateproof/worker.go
+++ b/stateproof/worker.go
@@ -161,6 +161,8 @@ func (spw *Worker) Stop() {
// SortAddress implements sorting by Address keys for
// canonical encoding of maps in msgpack format.
+//
+//msgp:sort basics.Address SortAddress
type SortAddress = basics.SortAddress
// Address is required for the msgpack sort binding, since it looks for Address and not basics.Address
diff --git a/test/e2e-go/features/catchup/catchpointCatchup_test.go b/test/e2e-go/features/catchup/catchpointCatchup_test.go
index 5d4e9b194..ecbecbdb8 100644
--- a/test/e2e-go/features/catchup/catchpointCatchup_test.go
+++ b/test/e2e-go/features/catchup/catchpointCatchup_test.go
@@ -28,54 +28,52 @@ import (
"github.com/stretchr/testify/require"
+ "github.com/algorand/go-deadlock"
+
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/daemon/algod/api/client"
"github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/node"
"github.com/algorand/go-algorand/nodecontrol"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/framework/fixtures"
"github.com/algorand/go-algorand/test/partitiontest"
- "github.com/algorand/go-deadlock"
)
const basicTestCatchpointInterval = 4
-func waitForCatchpointGeneration(fixture *fixtures.RestClientFixture, client client.RestClient, catchpointRound basics.Round) (string, error) {
+func waitForCatchpointGeneration(t *testing.T, fixture *fixtures.RestClientFixture, client client.RestClient, catchpointRound basics.Round) string {
err := fixture.ClientWaitForRoundWithTimeout(client, uint64(catchpointRound+1))
if err != nil {
- return "", err
+ return ""
}
+ var round basics.Round
var status model.NodeStatusResponse
- timer := time.NewTimer(10 * time.Second)
- for {
+ catchpointConfirmed := false
+ for i := 0; i < 1000; i++ {
status, err = client.Status()
- if err != nil {
- return "", err
- }
-
- var round basics.Round
+ require.NoError(t, err)
if status.LastCatchpoint != nil && len(*status.LastCatchpoint) > 0 {
round, _, err = ledgercore.ParseCatchpointLabel(*status.LastCatchpoint)
- if err != nil {
- return "", err
- }
+ require.NoError(t, err)
if round >= catchpointRound {
+ catchpointConfirmed = true
+ if i > 80 {
+ fmt.Printf("%s: waited for catchpont for %d sec\n", t.Name(), (i*250)/1000)
+ }
break
}
}
- select {
- case <-timer.C:
- return "", fmt.Errorf("timeout while waiting for catchpoint, target: %d, got %d", catchpointRound, round)
- default:
- time.Sleep(250 * time.Millisecond)
- }
+ time.Sleep(250 * time.Millisecond)
}
-
- return *status.LastCatchpoint, nil
+ if !catchpointConfirmed {
+ require.Failf(t, "timeout waiting on a catchpoint", "target: %d, got %d", catchpointRound, round)
+ }
+ return *status.LastCatchpoint
}
func denyRoundRequestsWebProxy(a *require.Assertions, listeningAddress string, round basics.Round) *fixtures.WebProxy {
@@ -277,6 +275,51 @@ func getFixture(consensusParams *config.ConsensusParams) *fixtures.RestClientFix
return &fixture
}
+func TestCatchpointCatchupFailure(t *testing.T) {
+ // Overview of this test:
+ // Start a two-node network (primary has 100%, using has 0%)
+ // create a web proxy, have the using node use it as a peer, blocking all requests for round #2. ( and allowing everything else )
+ // Let it run until the first usable catchpoint, as computed in getFirstCatchpointRound, is generated.
+ // Shut down the primary node so that using node will have no peers for catchpoint catchup.
+ // Instruct the using node to catchpoint catchup from the proxy.
+ // Make sure starting the catchpoint service returns an error.
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ if testing.Short() {
+ t.Skip()
+ }
+
+ consensusParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ applyCatchpointConsensusChanges(&consensusParams)
+ a := require.New(fixtures.SynchronizedTest(t))
+
+ fixture := getFixture(&consensusParams)
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "CatchpointCatchupTestNetwork.json"))
+
+ primaryNode, primaryNodeRestClient, primaryErrorsCollector := startCatchpointGeneratingNode(a, fixture, "Primary")
+ defer primaryNode.StopAlgod()
+
+ primaryNodeAddr, err := primaryNode.GetListeningAddress()
+ a.NoError(err)
+
+ usingNode, usingNodeRestClient, wp, usingNodeErrorsCollector := startCatchpointUsingNode(a, fixture, "Node", primaryNodeAddr)
+ defer usingNodeErrorsCollector.Print()
+ defer wp.Close()
+ defer usingNode.StopAlgod()
+
+ targetCatchpointRound := getFirstCatchpointRound(&consensusParams)
+
+ catchpointLabel := waitForCatchpointGeneration(t, fixture, primaryNodeRestClient, targetCatchpointRound)
+
+ primaryErrorsCollector.Print()
+ err = primaryNode.StopAlgod()
+ a.NoError(err)
+
+ _, err = usingNodeRestClient.Catchup(catchpointLabel)
+ a.ErrorContains(err, node.MakeStartCatchpointError(catchpointLabel, fmt.Errorf("")).Error())
+}
+
func TestBasicCatchpointCatchup(t *testing.T) {
// Overview of this test:
// Start a two-node network (primary has 100%, using has 0%)
@@ -313,8 +356,7 @@ func TestBasicCatchpointCatchup(t *testing.T) {
targetCatchpointRound := getFirstCatchpointRound(&consensusParams)
- catchpointLabel, err := waitForCatchpointGeneration(fixture, primaryNodeRestClient, targetCatchpointRound)
- a.NoError(err)
+ catchpointLabel := waitForCatchpointGeneration(t, fixture, primaryNodeRestClient, targetCatchpointRound)
_, err = usingNodeRestClient.Catchup(catchpointLabel)
a.NoError(err)
@@ -448,9 +490,6 @@ func TestNodeTxHandlerRestart(t *testing.T) {
// prepare it's configuration file to set it to generate a catchpoint every 16 rounds.
cfg, err := config.LoadConfigFromDisk(primaryNode.GetDataDir())
a.NoError(err)
- const catchpointInterval = 16
- cfg.CatchpointInterval = catchpointInterval
- cfg.CatchpointTracking = 2
cfg.MaxAcctLookback = 2
cfg.Archival = false
@@ -461,6 +500,9 @@ func TestNodeTxHandlerRestart(t *testing.T) {
cfg, err = config.LoadConfigFromDisk(relayNode.GetDataDir())
a.NoError(err)
+ const catchpointInterval = 16
+ cfg.CatchpointInterval = catchpointInterval
+ cfg.CatchpointTracking = 2
cfg.TxSyncIntervalSeconds = 200000 // disable txSync
cfg.SaveToDisk(relayNode.GetDataDir())
@@ -469,6 +511,8 @@ func TestNodeTxHandlerRestart(t *testing.T) {
client1 := fixture.GetLibGoalClientFromNodeController(primaryNode)
client2 := fixture.GetLibGoalClientFromNodeController(secondNode)
+ relayClient := fixture.GetAlgodClientForController(relayNode)
+
wallet1, err := client1.GetUnencryptedWalletHandle()
a.NoError(err)
wallet2, err := client2.GetUnencryptedWalletHandle()
@@ -487,32 +531,10 @@ func TestNodeTxHandlerRestart(t *testing.T) {
a.NoError(err)
targetCatchpointRound := status.LastRound
- // ensure the catchpoint is created for targetCatchpointRound
- timer := time.NewTimer(100 * time.Second)
-outer:
- for {
- status, err = client1.Status()
- a.NoError(err)
-
- var round basics.Round
- if status.LastCatchpoint != nil && len(*status.LastCatchpoint) > 0 {
- round, _, err = ledgercore.ParseCatchpointLabel(*status.LastCatchpoint)
- a.NoError(err)
- if uint64(round) >= targetCatchpointRound {
- break
- }
- }
- select {
- case <-timer.C:
- a.Failf("timeout waiting a catchpoint", "target: %d, got %d", targetCatchpointRound, round)
- break outer
- default:
- time.Sleep(250 * time.Millisecond)
- }
- }
+ lastCatchpoint := waitForCatchpointGeneration(t, &fixture, relayClient, basics.Round(targetCatchpointRound))
// let the primary node catchup
- err = client1.Catchup(*status.LastCatchpoint)
+ err = client1.Catchup(lastCatchpoint)
a.NoError(err)
status1, err := client1.Status()
@@ -573,9 +595,6 @@ func TestReadyEndpoint(t *testing.T) {
// prepare its configuration file to set it to generate a catchpoint every 16 rounds.
cfg, err := config.LoadConfigFromDisk(primaryNode.GetDataDir())
a.NoError(err)
- const catchpointInterval = 16
- cfg.CatchpointInterval = catchpointInterval
- cfg.CatchpointTracking = 2
cfg.MaxAcctLookback = 2
cfg.Archival = false
cfg.TxSyncIntervalSeconds = 200000 // disable txSync
@@ -587,6 +606,9 @@ func TestReadyEndpoint(t *testing.T) {
cfg, err = config.LoadConfigFromDisk(relayNode.GetDataDir())
a.NoError(err)
+ const catchpointInterval = 16
+ cfg.CatchpointInterval = catchpointInterval
+ cfg.CatchpointTracking = 2
cfg.TxSyncIntervalSeconds = 200000 // disable txSync
cfg.SaveToDisk(relayNode.GetDataDir())
@@ -595,6 +617,7 @@ func TestReadyEndpoint(t *testing.T) {
client1 := fixture.GetLibGoalClientFromNodeController(primaryNode)
client2 := fixture.GetLibGoalClientFromNodeController(secondNode)
+ relayClient := fixture.GetAlgodClientForController(relayNode)
wallet1, err := client1.GetUnencryptedWalletHandle()
a.NoError(err)
wallet2, err := client2.GetUnencryptedWalletHandle()
@@ -614,28 +637,7 @@ func TestReadyEndpoint(t *testing.T) {
targetCatchpointRound := status.LastRound
// ensure the catchpoint is created for targetCatchpointRound
- timer := time.NewTimer(100 * time.Second)
-outer:
- for {
- status, err = client1.Status()
- a.NoError(err)
-
- var round basics.Round
- if status.LastCatchpoint != nil && len(*status.LastCatchpoint) > 0 {
- round, _, err = ledgercore.ParseCatchpointLabel(*status.LastCatchpoint)
- a.NoError(err)
- if uint64(round) >= targetCatchpointRound {
- break
- }
- }
- select {
- case <-timer.C:
- a.Failf("timeout waiting a catchpoint", "target: %d, got %d", targetCatchpointRound, round)
- break outer
- default:
- time.Sleep(250 * time.Millisecond)
- }
- }
+ lastCatchpoint := waitForCatchpointGeneration(t, &fixture, relayClient, basics.Round(targetCatchpointRound))
//////////
// NOTE //
@@ -645,7 +647,7 @@ outer:
// Then when the primary node is at target round, it should satisfy ready 200 condition
// let the primary node catchup
- err = client1.Catchup(*status.LastCatchpoint)
+ err = client1.Catchup(lastCatchpoint)
a.NoError(err)
// The primary node is catching up with its previous catchpoint
@@ -666,7 +668,7 @@ outer:
// The primary node has reached the target round,
// - the sync-time (aka catchup time should be 0.0)
// - the catchpoint should be empty (len == 0)
- timer = time.NewTimer(100 * time.Second)
+ timer := time.NewTimer(100 * time.Second)
for {
err = primaryNodeRestClient.ReadyCheck()
@@ -732,9 +734,6 @@ func TestNodeTxSyncRestart(t *testing.T) {
// prepare it's configuration file to set it to generate a catchpoint every 16 rounds.
cfg, err := config.LoadConfigFromDisk(primaryNode.GetDataDir())
a.NoError(err)
- const catchpointInterval = 16
- cfg.CatchpointInterval = catchpointInterval
- cfg.CatchpointTracking = 2
cfg.MaxAcctLookback = 2
cfg.Archival = false
@@ -746,6 +745,9 @@ func TestNodeTxSyncRestart(t *testing.T) {
cfg, err = config.LoadConfigFromDisk(relayNode.GetDataDir())
a.NoError(err)
+ const catchpointInterval = 16
+ cfg.CatchpointInterval = catchpointInterval
+ cfg.CatchpointTracking = 2
cfg.TxSyncIntervalSeconds = 4
cfg.SaveToDisk(relayNode.GetDataDir())
@@ -754,6 +756,7 @@ func TestNodeTxSyncRestart(t *testing.T) {
client1 := fixture.GetLibGoalClientFromNodeController(primaryNode)
client2 := fixture.GetLibGoalClientFromNodeController(secondNode)
+ relayClient := fixture.GetAlgodClientForController(relayNode)
wallet1, err := client1.GetUnencryptedWalletHandle()
a.NoError(err)
wallet2, err := client2.GetUnencryptedWalletHandle()
@@ -773,28 +776,7 @@ func TestNodeTxSyncRestart(t *testing.T) {
targetCatchpointRound := status.LastRound
// ensure the catchpoint is created for targetCatchpointRound
- timer := time.NewTimer(100 * time.Second)
-outer:
- for {
- status, err = client1.Status()
- a.NoError(err)
-
- var round basics.Round
- if status.LastCatchpoint != nil && len(*status.LastCatchpoint) > 0 {
- round, _, err = ledgercore.ParseCatchpointLabel(*status.LastCatchpoint)
- a.NoError(err)
- if uint64(round) >= targetCatchpointRound {
- break
- }
- }
- select {
- case <-timer.C:
- a.Failf("timeout waiting a catchpoint", "target: %d, got %d", targetCatchpointRound, round)
- break outer
- default:
- time.Sleep(250 * time.Millisecond)
- }
- }
+ lastCatchpoint := waitForCatchpointGeneration(t, &fixture, relayClient, basics.Round(targetCatchpointRound))
// stop the primary node
client1.FullStop()
@@ -807,7 +789,7 @@ outer:
_, err = fixture.StartNode(primaryNode.GetDataDir())
a.NoError(err)
// let the primary node catchup
- err = client1.Catchup(*status.LastCatchpoint)
+ err = client1.Catchup(lastCatchpoint)
a.NoError(err)
// the transaction should not be confirmed yet
diff --git a/test/e2e-go/features/catchup/stateproofsCatchup_test.go b/test/e2e-go/features/catchup/stateproofsCatchup_test.go
index a0d20f5f1..7f11ca37f 100644
--- a/test/e2e-go/features/catchup/stateproofsCatchup_test.go
+++ b/test/e2e-go/features/catchup/stateproofsCatchup_test.go
@@ -91,8 +91,7 @@ func TestStateProofInReplayCatchpoint(t *testing.T) {
targetCatchpointRound := getFirstCatchpointRound(&consensusParams)
- catchpointLabel, err := waitForCatchpointGeneration(fixture, primaryNodeRestClient, targetCatchpointRound)
- a.NoError(err)
+ catchpointLabel := waitForCatchpointGeneration(t, fixture, primaryNodeRestClient, targetCatchpointRound)
_, err = usingNodeRestClient.Catchup(catchpointLabel)
a.NoError(err)
@@ -168,8 +167,7 @@ func TestStateProofAfterCatchpoint(t *testing.T) {
targetCatchpointRound := getFirstCatchpointRound(&consensusParams)
- catchpointLabel, err := waitForCatchpointGeneration(fixture, primaryNodeRestClient, targetCatchpointRound)
- a.NoError(err)
+ catchpointLabel := waitForCatchpointGeneration(t, fixture, primaryNodeRestClient, targetCatchpointRound)
_, err = usingNodeRestClient.Catchup(catchpointLabel)
a.NoError(err)
@@ -259,8 +257,7 @@ func TestSendSigsAfterCatchpointCatchup(t *testing.T) {
targetCatchpointRound := getFirstCatchpointRound(&consensusParams)
- catchpointLabel, err := waitForCatchpointGeneration(&fixture, primaryNodeRestClient, targetCatchpointRound)
- a.NoError(err)
+ catchpointLabel := waitForCatchpointGeneration(t, &fixture, primaryNodeRestClient, targetCatchpointRound)
_, err = usingNodeRestClient.Catchup(catchpointLabel)
a.NoError(err)
diff --git a/test/e2e-go/features/devmode/devmode_test.go b/test/e2e-go/features/devmode/devmode_test.go
index 728155ad3..5c2545a33 100644
--- a/test/e2e-go/features/devmode/devmode_test.go
+++ b/test/e2e-go/features/devmode/devmode_test.go
@@ -75,3 +75,42 @@ func TestDevMode(t *testing.T) {
prevTime = currTime
}
}
+
+// Starts up a devmode network, sends a txn, and fetches the txn group delta for that txn
+func TestTxnGroupDeltasDevMode(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ if testing.Short() {
+ t.Skip()
+ }
+
+ // Start devmode network, and send a transaction.
+ var fixture fixtures.RestClientFixture
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "DevModeTxnTracerNetwork.json"))
+ fixture.Start()
+ defer fixture.Shutdown()
+ sender, err := fixture.GetRichestAccount()
+ require.NoError(t, err)
+ key := crypto.GenerateSignatureSecrets(crypto.Seed{})
+ receiver := basics.Address(key.SignatureVerifier)
+ txn := fixture.SendMoneyAndWait(0, 100000, 1000, sender.Address, receiver.String(), "")
+ require.NotNil(t, txn.ConfirmedRound)
+ _, err = fixture.AlgodClient.Block(*txn.ConfirmedRound)
+ require.NoError(t, err)
+
+ // Test GetLedgerStateDeltaForTransactionGroup and verify the response contains a delta
+ txngroupResponse, err := fixture.AlgodClient.GetLedgerStateDeltaForTransactionGroup(txn.Txn.ID().String())
+ require.NoError(t, err)
+ require.True(t, len(txngroupResponse) > 0)
+
+ // Test GetTransactionGroupLedgerStateDeltasForRound and verify the response contains the delta for our txn
+ roundResponse, err := fixture.AlgodClient.GetTransactionGroupLedgerStateDeltasForRound(1)
+ require.NoError(t, err)
+ require.Equal(t, len(roundResponse.Deltas), 1)
+ groupDelta := roundResponse.Deltas[0]
+ require.Equal(t, 1, len(groupDelta.Ids))
+ require.Equal(t, groupDelta.Ids[0], txn.Txn.ID().String())
+
+ // Assert that the TxIDs field across both endpoint responses is the same
+ require.Equal(t, txngroupResponse["Txids"], groupDelta.Delta["Txids"])
+}
diff --git a/test/e2e-go/features/followerNode/syncDeltas_test.go b/test/e2e-go/features/followernode/syncDeltas_test.go
index 2a8d2b961..bd3d8e0d8 100644
--- a/test/e2e-go/features/followerNode/syncDeltas_test.go
+++ b/test/e2e-go/features/followernode/syncDeltas_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package followerNode
+package followernode
import (
"path/filepath"
diff --git a/test/e2e-go/features/followerNode/syncRestart_test.go b/test/e2e-go/features/followernode/syncRestart_test.go
index 8137f9e24..9bdd686f4 100644
--- a/test/e2e-go/features/followerNode/syncRestart_test.go
+++ b/test/e2e-go/features/followernode/syncRestart_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package followerNode
+package followernode
import (
"path/filepath"
diff --git a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
index 767ee86b3..18c985c20 100644
--- a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
+++ b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
@@ -104,9 +104,9 @@ func waitForAccountToProposeBlock(a *require.Assertions, fixture *fixtures.RestC
}
// TestNewAccountCanGoOnlineAndParticipate tests two behaviors:
-// - When the account does not have enough stake, or after receivning algos, but before the lookback rounds,
-// it should not be proposing blocks
-// - When the account balance receives enough stake, it should be proposing after lookback rounds
+// - When the account does not have enough stake, or after receivning algos, but before the lookback rounds,
+// it should not be proposing blocks
+// - When the account balance receives enough stake, it should be proposing after lookback rounds
func TestNewAccountCanGoOnlineAndParticipate(t *testing.T) {
partitiontest.PartitionTest(t)
defer fixtures.ShutdownSynchronizedTest(t)
diff --git a/test/e2e-go/features/participation/overlappingParticipationKeys_test.go b/test/e2e-go/features/participation/overlappingParticipationKeys_test.go
index 7565a821a..b6df5b441 100644
--- a/test/e2e-go/features/participation/overlappingParticipationKeys_test.go
+++ b/test/e2e-go/features/participation/overlappingParticipationKeys_test.go
@@ -44,7 +44,8 @@ import (
// ((Network Round - 1) Mod 10) = nodeIdx and nodeIdx is used to pull out from an
// "array" of nodes similar to {Node1, Node2, Node3} etc. The Mod 10 simply pulls the
// "digit" from the number:
-// Round: 13 -> 13 - 1 = 12 -> 12 Mod 10 -> 2 -> Node3 with nodeIdx == 2
+//
+// Round: 13 -> 13 - 1 = 12 -> 12 Mod 10 -> 2 -> Node3 with nodeIdx == 2
//
// The keys are overlapped in the sense that a key is registered to a node and
// "overlaps" with other installed keys that are also valid. Meaning there might be:
diff --git a/test/e2e-go/restAPI/restClient_test.go b/test/e2e-go/restAPI/restClient_test.go
index 2ff0c08e5..5d14f64d2 100644
--- a/test/e2e-go/restAPI/restClient_test.go
+++ b/test/e2e-go/restAPI/restClient_test.go
@@ -25,6 +25,7 @@ import (
"fmt"
"math"
"math/rand"
+ "net/http"
"os"
"path/filepath"
"sort"
@@ -44,6 +45,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/ledger/simulation"
"github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/framework/fixtures"
@@ -1566,6 +1568,94 @@ end:
assertBoxCount(numberOfBoxesRemaining)
}
+func TestSimulateTxnTracerDevMode(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ a := require.New(fixtures.SynchronizedTest(t))
+ var localFixture fixtures.RestClientFixture
+ localFixture.Setup(t, filepath.Join("nettemplates", "DevModeTxnTracerNetwork.json"))
+ defer localFixture.Shutdown()
+
+ testClient := localFixture.LibGoalClient
+
+ _, err := testClient.WaitForRound(1)
+ a.NoError(err)
+
+ wh, err := testClient.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ addresses, err := testClient.ListAddresses(wh)
+ a.NoError(err)
+ senderBalance, senderAddress := getMaxBalAddr(t, testClient, addresses)
+ if senderAddress == "" {
+ t.Error("no addr with funds")
+ }
+ a.NoError(err)
+
+ toAddress := getDestAddr(t, testClient, nil, senderAddress, wh)
+ closeToAddress := getDestAddr(t, testClient, nil, senderAddress, wh)
+
+ // Ensure these accounts don't exist
+ receiverBalance, err := testClient.GetBalance(toAddress)
+ a.NoError(err)
+ a.Zero(receiverBalance)
+ closeToBalance, err := testClient.GetBalance(closeToAddress)
+ a.NoError(err)
+ a.Zero(closeToBalance)
+
+ txn, err := testClient.ConstructPayment(senderAddress, toAddress, 0, senderBalance/2, nil, closeToAddress, [32]byte{}, 0, 0)
+ a.NoError(err)
+ stxn, err := testClient.SignTransactionWithWallet(wh, nil, txn)
+ a.NoError(err)
+
+ currentRoundBeforeSimulate, err := testClient.CurrentRound()
+ a.NoError(err)
+
+ simulateRequest := v2.PreEncodedSimulateRequest{
+ TxnGroups: []v2.PreEncodedSimulateRequestTransactionGroup{
+ {
+ Txns: []transactions.SignedTxn{stxn},
+ },
+ },
+ }
+ result, err := testClient.SimulateTransactions(simulateRequest)
+ a.NoError(err)
+
+ currentAfterAfterSimulate, err := testClient.CurrentRound()
+ a.NoError(err)
+
+ // We can assert equality here since DevMode rounds are controlled by txn sends.
+ a.Equal(result.LastRound, currentRoundBeforeSimulate)
+ a.Equal(result.LastRound, currentAfterAfterSimulate)
+
+ closingAmount := senderBalance - txn.Fee.Raw - txn.Amount.Raw
+ expectedResult := v2.PreEncodedSimulateResponse{
+ Version: 2,
+ LastRound: result.LastRound, // checked above
+ TxnGroups: []v2.PreEncodedSimulateTxnGroupResult{
+ {
+ Txns: []v2.PreEncodedSimulateTxnResult{
+ {
+ Txn: v2.PreEncodedTxInfo{
+ Txn: stxn,
+ ClosingAmount: &closingAmount,
+ },
+ },
+ },
+ },
+ },
+ }
+ a.Equal(expectedResult, result)
+
+ // Ensure the transaction did not actually get applied to the ledger
+ receiverBalance, err = testClient.GetBalance(toAddress)
+ a.NoError(err)
+ a.Zero(receiverBalance)
+ closeToBalance, err = testClient.GetBalance(closeToAddress)
+ a.NoError(err)
+ a.Zero(closeToBalance)
+}
+
func TestSimulateTransaction(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -1738,17 +1828,6 @@ func TestSimulateWithUnlimitedLog(t *testing.T) {
}
a.NoError(err)
- toAddress := getDestAddr(t, testClient, nil, senderAddress, wh)
- closeToAddress := getDestAddr(t, testClient, nil, senderAddress, wh)
-
- // Ensure these accounts don't exist
- receiverBalance, err := testClient.GetBalance(toAddress)
- a.NoError(err)
- a.Zero(receiverBalance)
- closeToBalance, err := testClient.GetBalance(closeToAddress)
- a.NoError(err)
- a.Zero(closeToBalance)
-
// construct program that uses a lot of log
prog := `#pragma version 8
txn NumAppArgs
@@ -1785,12 +1864,10 @@ int 1`
// sign and broadcast
appCreateTxID, err := testClient.SignAndBroadcastTransaction(wh, nil, appCreateTxn)
a.NoError(err)
- _, err = waitForTransaction(t, testClient, senderAddress, appCreateTxID, 30*time.Second)
+ submittedAppCreateTxn, err := waitForTransaction(t, testClient, senderAddress, appCreateTxID, 30*time.Second)
a.NoError(err)
// get app ID
- submittedAppCreateTxn, err := testClient.PendingTransactionInformation(appCreateTxID)
- a.NoError(err)
a.NotNil(submittedAppCreateTxn.ApplicationIndex)
createdAppID := basics.AppIndex(*submittedAppCreateTxn.ApplicationIndex)
a.Greater(uint64(createdAppID), uint64(0))
@@ -1884,17 +1961,6 @@ func TestSimulateWithExtraBudget(t *testing.T) {
}
a.NoError(err)
- toAddress := getDestAddr(t, testClient, nil, senderAddress, wh)
- closeToAddress := getDestAddr(t, testClient, nil, senderAddress, wh)
-
- // Ensure these accounts don't exist
- receiverBalance, err := testClient.GetBalance(toAddress)
- a.NoError(err)
- a.Zero(receiverBalance)
- closeToBalance, err := testClient.GetBalance(closeToAddress)
- a.NoError(err)
- a.Zero(closeToBalance)
-
// construct program that uses a lot of budget
prog := `#pragma version 8
txn ApplicationID
@@ -1926,12 +1992,10 @@ int 1`
// sign and broadcast
appCreateTxID, err := testClient.SignAndBroadcastTransaction(wh, nil, appCreateTxn)
a.NoError(err)
- _, err = waitForTransaction(t, testClient, senderAddress, appCreateTxID, 30*time.Second)
+ submittedAppCreateTxn, err := waitForTransaction(t, testClient, senderAddress, appCreateTxID, 30*time.Second)
a.NoError(err)
// get app ID
- submittedAppCreateTxn, err := testClient.PendingTransactionInformation(appCreateTxID)
- a.NoError(err)
a.NotNil(submittedAppCreateTxn.ApplicationIndex)
createdAppID := basics.AppIndex(*submittedAppCreateTxn.ApplicationIndex)
a.Greater(uint64(createdAppID), uint64(0))
@@ -1988,3 +2052,1153 @@ int 1`
}
a.Equal(expectedResult, resp)
}
+
+func toPtr[T any](constVar T) *T { return &constVar }
+
+func valToNil[T comparable](v *T) *T {
+ var defaultV T
+ if v == nil || *v == defaultV {
+ return nil
+ }
+ return v
+}
+
+// The program is copied from pyteal source for c2c test over betanet:
+// source: https://github.com/ahangsu/c2c-testscript/blob/master/c2c_test/max_depth/app.py
+const maxDepthTealApproval = `#pragma version 8
+txn ApplicationID
+int 0
+==
+bnz main_l6
+txn NumAppArgs
+int 1
+==
+bnz main_l3
+err
+main_l3:
+global CurrentApplicationID
+app_params_get AppApprovalProgram
+store 1
+store 0
+global CurrentApplicationID
+app_params_get AppClearStateProgram
+store 3
+store 2
+global CurrentApplicationAddress
+acct_params_get AcctBalance
+store 5
+store 4
+load 1
+assert
+load 3
+assert
+load 5
+assert
+int 2
+txna ApplicationArgs 0
+btoi
+exp
+itob
+log
+txna ApplicationArgs 0
+btoi
+int 0
+>
+bnz main_l5
+main_l4:
+int 1
+return
+main_l5:
+itxn_begin
+ int appl
+ itxn_field TypeEnum
+ int 0
+ itxn_field Fee
+ load 0
+ itxn_field ApprovalProgram
+ load 2
+ itxn_field ClearStateProgram
+itxn_submit
+itxn_begin
+ int pay
+ itxn_field TypeEnum
+ int 0
+ itxn_field Fee
+ load 4
+ int 100000
+ -
+ itxn_field Amount
+ byte "appID"
+ gitxn 0 CreatedApplicationID
+ itob
+ concat
+ sha512_256
+ itxn_field Receiver
+itxn_next
+ int appl
+ itxn_field TypeEnum
+ txna ApplicationArgs 0
+ btoi
+ int 1
+ -
+ itob
+ itxn_field ApplicationArgs
+ itxn CreatedApplicationID
+ itxn_field ApplicationID
+ int 0
+ itxn_field Fee
+ int DeleteApplication
+ itxn_field OnCompletion
+itxn_submit
+b main_l4
+main_l6:
+int 1
+return`
+
+func goValuesToAvmValues(goValues ...interface{}) *[]model.AvmValue {
+ if len(goValues) == 0 {
+ return nil
+ }
+
+ boolToUint64 := func(b bool) uint64 {
+ if b {
+ return 1
+ }
+ return 0
+ }
+
+ modelValues := make([]model.AvmValue, len(goValues))
+ for i, goValue := range goValues {
+ switch converted := goValue.(type) {
+ case []byte:
+ modelValues[i] = model.AvmValue{
+ Type: uint64(basics.TealBytesType),
+ Bytes: &converted,
+ }
+ case bool:
+ convertedUint := boolToUint64(converted)
+ modelValues[i] = model.AvmValue{
+ Type: uint64(basics.TealUintType),
+ Uint: valToNil(&convertedUint),
+ }
+ case int:
+ convertedUint := uint64(converted)
+ modelValues[i] = model.AvmValue{
+ Type: uint64(basics.TealUintType),
+ Uint: valToNil(&convertedUint),
+ }
+ case basics.AppIndex:
+ convertedUint := uint64(converted)
+ modelValues[i] = model.AvmValue{
+ Type: uint64(basics.TealUintType),
+ Uint: valToNil(&convertedUint),
+ }
+ case uint64:
+ modelValues[i] = model.AvmValue{
+ Type: uint64(basics.TealUintType),
+ Uint: valToNil(&converted),
+ }
+ default:
+ panic("unexpected type inferred from interface{}")
+ }
+ }
+ return &modelValues
+}
+
+func TestMaxDepthAppWithPCandStackTrace(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ a := require.New(fixtures.SynchronizedTest(t))
+ var localFixture fixtures.RestClientFixture
+ localFixture.SetupNoStart(t, filepath.Join("nettemplates", "OneNodeFuture.json"))
+
+ // Get primary node
+ primaryNode, err := fixture.GetNodeController("Primary")
+ a.NoError(err)
+
+ fixture.Start()
+ defer primaryNode.FullStop()
+
+ // get lib goal client
+ testClient := fixture.LibGoalFixture.GetLibGoalClientFromNodeController(primaryNode)
+
+ _, err = testClient.WaitForRound(1)
+ a.NoError(err)
+
+ wh, err := testClient.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ addresses, err := testClient.ListAddresses(wh)
+ a.NoError(err)
+ _, senderAddress := getMaxBalAddr(t, testClient, addresses)
+ a.NotEmpty(senderAddress, "no addr with funds")
+ a.NoError(err)
+
+ ops, err := logic.AssembleString(maxDepthTealApproval)
+ a.NoError(err)
+ approval := ops.Program
+ ops, err = logic.AssembleString("#pragma version 8\nint 1")
+ a.NoError(err)
+ clearState := ops.Program
+
+ gl := basics.StateSchema{}
+ lc := basics.StateSchema{}
+
+ MaxDepth := 2
+ MinFee := config.Consensus[protocol.ConsensusFuture].MinTxnFee
+ MinBalance := config.Consensus[protocol.ConsensusFuture].MinBalance
+
+ // create app and get the application ID
+ appCreateTxn, err := testClient.MakeUnsignedAppCreateTx(
+ transactions.NoOpOC, approval, clearState, gl,
+ lc, nil, nil, nil, nil, nil, 0)
+ a.NoError(err)
+ appCreateTxn, err = testClient.FillUnsignedTxTemplate(senderAddress, 0, 0, 0, appCreateTxn)
+ a.NoError(err)
+
+ appCreateTxID, err := testClient.SignAndBroadcastTransaction(wh, nil, appCreateTxn)
+ a.NoError(err)
+ submittedAppCreateTxn, err := waitForTransaction(t, testClient, senderAddress, appCreateTxID, 30*time.Second)
+ a.NoError(err)
+ futureAppID := basics.AppIndex(*submittedAppCreateTxn.ApplicationIndex)
+
+ // fund app account
+ appFundTxn, err := testClient.SendPaymentFromWallet(
+ wh, nil, senderAddress, futureAppID.Address().String(),
+ 0, MinBalance*uint64(MaxDepth+1), nil, "", 0, 0,
+ )
+ a.NoError(err)
+
+ uint64ToBytes := func(v uint64) []byte {
+ b := make([]byte, 8)
+ binary.BigEndian.PutUint64(b, v)
+ return b
+ }
+
+ // construct app calls
+ appCallTxn, err := testClient.MakeUnsignedAppNoOpTx(
+ uint64(futureAppID), [][]byte{uint64ToBytes(uint64(MaxDepth))}, nil, nil, nil, nil,
+ )
+ a.NoError(err)
+ appCallTxn, err = testClient.FillUnsignedTxTemplate(senderAddress, 0, 0, MinFee*uint64(3*MaxDepth+2), appCallTxn)
+ a.NoError(err)
+
+ // Group the transactions, and start the simulation
+ gid, err := testClient.GroupID([]transactions.Transaction{appFundTxn, appCallTxn})
+ a.NoError(err)
+ appFundTxn.Group = gid
+ appCallTxn.Group = gid
+
+ appFundTxnSigned, err := testClient.SignTransactionWithWallet(wh, nil, appFundTxn)
+ a.NoError(err)
+ appCallTxnSigned, err := testClient.SignTransactionWithWallet(wh, nil, appCallTxn)
+ a.NoError(err)
+
+ // The first simulation should not pass, for simulation return PC in config has not been activated
+ execTraceConfig := simulation.ExecTraceConfig{
+ Enable: true,
+ Stack: true,
+ }
+ simulateRequest := v2.PreEncodedSimulateRequest{
+ TxnGroups: []v2.PreEncodedSimulateRequestTransactionGroup{
+ {Txns: []transactions.SignedTxn{appFundTxnSigned, appCallTxnSigned}},
+ },
+ ExecTraceConfig: execTraceConfig,
+ }
+
+ _, err = testClient.SimulateTransactions(simulateRequest)
+ var httpError client.HTTPError
+ a.ErrorAs(err, &httpError)
+ a.Equal(http.StatusBadRequest, httpError.StatusCode)
+ a.Contains(httpError.ErrorString, "the local configuration of the node has `EnableDeveloperAPI` turned off, while requesting for execution trace")
+
+ // update the configuration file to enable EnableDeveloperAPI
+ err = primaryNode.FullStop()
+ a.NoError(err)
+ cfg, err := config.LoadConfigFromDisk(primaryNode.GetDataDir())
+ a.NoError(err)
+ cfg.EnableDeveloperAPI = true
+ err = cfg.SaveToDisk(primaryNode.GetDataDir())
+ require.NoError(t, err)
+ fixture.Start()
+
+ resp, err := testClient.SimulateTransactions(simulateRequest)
+ a.NoError(err)
+
+ // Check expected == actual
+ creationOpcodeTrace := []model.SimulationOpcodeTraceUnit{
+ {
+ Pc: 1,
+ },
+ // txn ApplicationID
+ {
+ Pc: 6,
+ StackAdditions: goValuesToAvmValues(0),
+ },
+ // int 0
+ {
+ Pc: 8,
+ StackAdditions: goValuesToAvmValues(0),
+ },
+ // ==
+ {
+ Pc: 9,
+ StackPopCount: toPtr[uint64](2),
+ StackAdditions: goValuesToAvmValues(1),
+ },
+ // bnz main_l6
+ {
+ Pc: 10,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // int 1
+ {
+ Pc: 149,
+ StackAdditions: goValuesToAvmValues(1),
+ },
+ // return
+ {
+ Pc: 150,
+ StackAdditions: goValuesToAvmValues(1),
+ StackPopCount: toPtr[uint64](1),
+ },
+ }
+
+ const NumArgs = 1
+
+ recursiveLongOpcodeTrace := func(appID basics.AppIndex, layer int) *[]model.SimulationOpcodeTraceUnit {
+ return &[]model.SimulationOpcodeTraceUnit{
+ {
+ Pc: 1,
+ },
+ // txn ApplicationID
+ {
+ Pc: 6,
+ StackAdditions: goValuesToAvmValues(appID),
+ },
+ // int 0
+ {
+ Pc: 8,
+ StackAdditions: goValuesToAvmValues(0),
+ },
+ // ==
+ {
+ Pc: 9,
+ StackAdditions: goValuesToAvmValues(false),
+ StackPopCount: toPtr[uint64](2),
+ },
+ // bnz main_l6
+ {
+ Pc: 10,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // txn NumAppArgs
+ {
+ Pc: 13,
+ StackAdditions: goValuesToAvmValues(NumArgs),
+ },
+ // int 1
+ {
+ Pc: 15,
+ StackAdditions: goValuesToAvmValues(1),
+ },
+ // ==
+ {
+ Pc: 16,
+ StackPopCount: toPtr[uint64](2),
+ StackAdditions: goValuesToAvmValues(true),
+ },
+ // bnz main_l3
+ {
+ Pc: 17,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // global CurrentApplicationID
+ {
+ Pc: 21,
+ StackAdditions: goValuesToAvmValues(appID),
+ },
+ // app_params_get AppApprovalProgram
+ {
+ Pc: 23,
+ StackAdditions: goValuesToAvmValues(approval, 1),
+ StackPopCount: toPtr[uint64](1),
+ },
+ // store 1
+ {
+ Pc: 25,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // store 0
+ {
+ Pc: 27,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // global CurrentApplicationID
+ {
+ Pc: 29,
+ StackAdditions: goValuesToAvmValues(appID),
+ },
+ // app_params_get AppClearStateProgram
+ {
+ Pc: 31,
+ StackAdditions: goValuesToAvmValues(clearState, 1),
+ StackPopCount: toPtr[uint64](1),
+ },
+ // store 3
+ {
+ Pc: 33,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // store 2
+ {
+ Pc: 35,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // global CurrentApplicationAddress
+ {
+ Pc: 37,
+ StackAdditions: goValuesToAvmValues(crypto.Digest(appID.Address()).ToSlice()),
+ },
+ // acct_params_get AcctBalance
+ {
+ Pc: 39,
+ StackAdditions: goValuesToAvmValues(uint64(3-layer)*MinBalance, 1),
+ StackPopCount: toPtr[uint64](1),
+ },
+ // store 5
+ {
+ Pc: 41,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // store 4
+ {
+ Pc: 43,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // load 1
+ {
+ Pc: 45,
+ StackAdditions: goValuesToAvmValues(1),
+ },
+ // assert
+ {
+ Pc: 47,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // load 3
+ {
+ Pc: 48,
+ StackAdditions: goValuesToAvmValues(1),
+ },
+ // assert
+ {
+ Pc: 50,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // load 5
+ {
+ Pc: 51,
+ StackAdditions: goValuesToAvmValues(1),
+ },
+ // assert
+ {
+ Pc: 53,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // int 2
+ {
+ Pc: 54,
+ StackAdditions: goValuesToAvmValues(2),
+ },
+ // txna ApplicationArgs 0
+ {
+ Pc: 56,
+ StackAdditions: goValuesToAvmValues(uint64ToBytes(uint64(MaxDepth - layer))),
+ },
+ // btoi
+ {
+ Pc: 59,
+ StackAdditions: goValuesToAvmValues(uint64(MaxDepth - layer)),
+ StackPopCount: toPtr[uint64](1),
+ },
+ // exp
+ {
+ Pc: 60,
+ StackAdditions: goValuesToAvmValues(1 << (MaxDepth - layer)),
+ StackPopCount: toPtr[uint64](2),
+ },
+ // itob
+ {
+ Pc: 61,
+ StackAdditions: goValuesToAvmValues(uint64ToBytes(1 << uint64(MaxDepth-layer))),
+ StackPopCount: toPtr[uint64](1),
+ },
+ // log
+ {
+ Pc: 62,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // txna ApplicationArgs 0
+ {
+ Pc: 63,
+ StackAdditions: goValuesToAvmValues(uint64ToBytes(uint64(MaxDepth - layer))),
+ },
+ // btoi
+ {
+ Pc: 66,
+ StackAdditions: goValuesToAvmValues(MaxDepth - layer),
+ StackPopCount: toPtr[uint64](1),
+ },
+ // int 0
+ {
+ Pc: 67,
+ StackAdditions: goValuesToAvmValues(0),
+ },
+ // >
+ {
+ Pc: 68,
+ StackAdditions: goValuesToAvmValues(MaxDepth-layer > 0),
+ StackPopCount: toPtr[uint64](2),
+ },
+ // bnz main_l5
+ {
+ Pc: 69,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // itxn_begin
+ {
+ Pc: 74,
+ },
+ // int appl
+ {
+ Pc: 75,
+ StackAdditions: goValuesToAvmValues(6),
+ },
+ // itxn_field TypeEnum
+ {
+ Pc: 76,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // int 0
+ {
+ Pc: 78,
+ StackAdditions: goValuesToAvmValues(0),
+ },
+ // itxn_field Fee
+ {
+ Pc: 79,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // load 0
+ {
+ Pc: 81,
+ StackAdditions: goValuesToAvmValues(approval),
+ },
+ // itxn_field ApprovalProgram
+ {
+ Pc: 83,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // load 2
+ {
+ Pc: 85,
+ StackAdditions: goValuesToAvmValues(clearState),
+ },
+ // itxn_field ClearStateProgram
+ {
+ Pc: 87,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // itxn_submit
+ {
+ Pc: 89,
+ SpawnedInners: &[]uint64{0},
+ },
+ // itxn_begin
+ {
+ Pc: 90,
+ },
+ // int pay
+ {
+ Pc: 91,
+ StackAdditions: goValuesToAvmValues(1),
+ },
+ // itxn_field TypeEnum
+ {
+ Pc: 92,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // int 0
+ {
+ Pc: 94,
+ StackAdditions: goValuesToAvmValues(0),
+ },
+ // itxn_field Fee
+ {
+ Pc: 95,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // load 4
+ {
+ Pc: 97,
+ StackAdditions: goValuesToAvmValues(uint64(3-layer) * MinBalance),
+ },
+ // int 100000
+ {
+ Pc: 99,
+ StackAdditions: goValuesToAvmValues(MinBalance),
+ },
+ // -
+ {
+ Pc: 103,
+ StackPopCount: toPtr[uint64](2),
+ StackAdditions: goValuesToAvmValues(uint64(2-layer) * MinBalance),
+ },
+ // itxn_field Amount
+ {
+ Pc: 104,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // byte "appID"
+ {
+ Pc: 106,
+ StackAdditions: goValuesToAvmValues([]byte("appID")),
+ },
+ // gitxn 0 CreatedApplicationID
+ {
+ Pc: 113,
+ StackAdditions: goValuesToAvmValues(appID + 3),
+ },
+ // itob
+ {
+ Pc: 116,
+ StackAdditions: goValuesToAvmValues(uint64ToBytes(uint64(appID) + 3)),
+ StackPopCount: toPtr[uint64](1),
+ },
+ // concat
+ {
+ Pc: 117,
+ StackAdditions: goValuesToAvmValues([]byte("appID" + string(uint64ToBytes(uint64(appID)+3)))),
+ StackPopCount: toPtr[uint64](2),
+ },
+ // sha512_256
+ {
+ Pc: 118,
+ StackAdditions: goValuesToAvmValues(crypto.Digest(basics.AppIndex(uint64(appID) + 3).Address()).ToSlice()),
+ StackPopCount: toPtr[uint64](1),
+ },
+ // itxn_field Receiver
+ {
+ Pc: 119,
+ StackPopCount: toPtr[uint64](1),
+ },
+ {
+ Pc: 121,
+ },
+ // int appl
+ {
+ Pc: 122,
+ StackAdditions: goValuesToAvmValues(6),
+ },
+ // itxn_field TypeEnum
+ {
+ Pc: 123,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // txna ApplicationArgs 0
+ {
+ Pc: 125,
+ StackAdditions: goValuesToAvmValues(uint64ToBytes(uint64(MaxDepth - layer))),
+ },
+ // btoi
+ {
+ Pc: 128,
+ StackAdditions: goValuesToAvmValues(MaxDepth - layer),
+ StackPopCount: toPtr[uint64](1),
+ },
+ // int 1
+ {
+ Pc: 129,
+ StackAdditions: goValuesToAvmValues(1),
+ },
+ // -
+ {
+ Pc: 130,
+ StackAdditions: goValuesToAvmValues(MaxDepth - layer - 1),
+ StackPopCount: toPtr[uint64](2),
+ },
+ // itob
+ {
+ Pc: 131,
+ StackAdditions: goValuesToAvmValues(uint64ToBytes(uint64(MaxDepth - layer - 1))),
+ StackPopCount: toPtr[uint64](1),
+ },
+ // itxn_field ApplicationArgs
+ {
+ Pc: 132,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // itxn CreatedApplicationID
+ {
+ Pc: 134,
+ StackAdditions: goValuesToAvmValues(appID + 3),
+ },
+ // itxn_field ApplicationID
+ {
+ Pc: 136,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // int 0
+ {
+ Pc: 138,
+ StackAdditions: goValuesToAvmValues(0),
+ },
+ // itxn_field Fee
+ {
+ Pc: 139,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // int DeleteApplication
+ {
+ Pc: 141,
+ StackAdditions: goValuesToAvmValues(5),
+ },
+ // itxn_field OnCompletion
+ {
+ Pc: 143,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // itxn_submit
+ {
+ Pc: 145,
+ SpawnedInners: &[]uint64{1, 2},
+ },
+ // b main_l4
+ {
+ Pc: 146,
+ },
+ // int 1
+ {
+ Pc: 72,
+ StackAdditions: goValuesToAvmValues(1),
+ },
+ // return
+ {
+ Pc: 73,
+ StackAdditions: goValuesToAvmValues(1),
+ StackPopCount: toPtr[uint64](1),
+ },
+ }
+ }
+
+ finalDepthTrace := func(appID basics.AppIndex, layer int) *[]model.SimulationOpcodeTraceUnit {
+ return &[]model.SimulationOpcodeTraceUnit{
+ {
+ Pc: 1,
+ },
+ // txn ApplicationID
+ {
+ Pc: 6,
+ StackAdditions: goValuesToAvmValues(appID),
+ },
+ // int 0
+ {
+ Pc: 8,
+ StackAdditions: goValuesToAvmValues(0),
+ },
+ // ==
+ {
+ Pc: 9,
+ StackAdditions: goValuesToAvmValues(false),
+ StackPopCount: toPtr[uint64](2),
+ },
+ // bnz main_l6
+ {
+ Pc: 10,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // txn NumAppArgs
+ {
+ Pc: 13,
+ StackAdditions: goValuesToAvmValues(NumArgs),
+ },
+ // int 1
+ {
+ Pc: 15,
+ StackAdditions: goValuesToAvmValues(1),
+ },
+ // ==
+ {
+ Pc: 16,
+ StackPopCount: toPtr[uint64](2),
+ StackAdditions: goValuesToAvmValues(true),
+ },
+ // bnz main_l3
+ {
+ Pc: 17,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // global CurrentApplicationID
+ {
+ Pc: 21,
+ StackAdditions: goValuesToAvmValues(appID),
+ },
+ // app_params_get AppApprovalProgram
+ {
+ Pc: 23,
+ StackAdditions: goValuesToAvmValues(approval, 1),
+ StackPopCount: toPtr[uint64](1),
+ },
+ // store 1
+ {
+ Pc: 25,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // store 0
+ {
+ Pc: 27,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // global CurrentApplicationID
+ {
+ Pc: 29,
+ StackAdditions: goValuesToAvmValues(appID),
+ },
+ // app_params_get AppClearStateProgram
+ {
+ Pc: 31,
+ StackAdditions: goValuesToAvmValues(clearState, 1),
+ StackPopCount: toPtr[uint64](1),
+ },
+ // store 3
+ {
+ Pc: 33,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // store 2
+ {
+ Pc: 35,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // global CurrentApplicationAddress
+ {
+ Pc: 37,
+ StackAdditions: goValuesToAvmValues(crypto.Digest(appID.Address()).ToSlice()),
+ },
+ // acct_params_get AcctBalance
+ {
+ Pc: 39,
+ StackAdditions: goValuesToAvmValues(uint64(3-layer)*MinBalance, 1),
+ StackPopCount: toPtr[uint64](1),
+ },
+ // store 5
+ {
+ Pc: 41,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // store 4
+ {
+ Pc: 43,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // load 1
+ {
+ Pc: 45,
+ StackAdditions: goValuesToAvmValues(1),
+ },
+ // assert
+ {
+ Pc: 47,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // load 3
+ {
+ Pc: 48,
+ StackAdditions: goValuesToAvmValues(1),
+ },
+ // assert
+ {
+ Pc: 50,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // load 5
+ {
+ Pc: 51,
+ StackAdditions: goValuesToAvmValues(1),
+ },
+ // assert
+ {
+ Pc: 53,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // int 2
+ {
+ Pc: 54,
+ StackAdditions: goValuesToAvmValues(2),
+ },
+ // txna ApplicationArgs 0
+ {
+ Pc: 56,
+ StackAdditions: goValuesToAvmValues(uint64ToBytes(uint64(MaxDepth - layer))),
+ },
+ // btoi
+ {
+ Pc: 59,
+ StackAdditions: goValuesToAvmValues(uint64(MaxDepth - layer)),
+ StackPopCount: toPtr[uint64](1),
+ },
+ // exp
+ {
+ Pc: 60,
+ StackAdditions: goValuesToAvmValues(1 << (MaxDepth - layer)),
+ StackPopCount: toPtr[uint64](2),
+ },
+ // itob
+ {
+ Pc: 61,
+ StackAdditions: goValuesToAvmValues(uint64ToBytes(1 << uint64(MaxDepth-layer))),
+ StackPopCount: toPtr[uint64](1),
+ },
+ // log
+ {
+ Pc: 62,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // txna ApplicationArgs 0
+ {
+ Pc: 63,
+ StackAdditions: goValuesToAvmValues(uint64ToBytes(uint64(MaxDepth - layer))),
+ },
+ // btoi
+ {
+ Pc: 66,
+ StackAdditions: goValuesToAvmValues(MaxDepth - layer),
+ StackPopCount: toPtr[uint64](1),
+ },
+ // int 0
+ {
+ Pc: 67,
+ StackAdditions: goValuesToAvmValues(0),
+ },
+ // >
+ {
+ Pc: 68,
+ StackAdditions: goValuesToAvmValues(MaxDepth-layer > 0),
+ StackPopCount: toPtr[uint64](2),
+ },
+ // bnz main_l5
+ {
+ Pc: 69,
+ StackPopCount: toPtr[uint64](1),
+ },
+ // int 1
+ {
+ Pc: 72,
+ StackAdditions: goValuesToAvmValues(1),
+ },
+ // return
+ {
+ Pc: 73,
+ StackAdditions: goValuesToAvmValues(1),
+ StackPopCount: toPtr[uint64](1),
+ },
+ }
+ }
+
+ a.Len(resp.TxnGroups[0].Txns, 2)
+ a.Nil(resp.TxnGroups[0].FailureMessage)
+ a.Nil(resp.TxnGroups[0].FailedAt)
+
+ a.Nil(resp.TxnGroups[0].Txns[0].TransactionTrace)
+
+ expectedTraceSecondTxn := &model.SimulationTransactionExecTrace{
+ ApprovalProgramTrace: recursiveLongOpcodeTrace(futureAppID, 0),
+ InnerTrace: &[]model.SimulationTransactionExecTrace{
+ {ApprovalProgramTrace: &creationOpcodeTrace},
+ {},
+ {
+ ApprovalProgramTrace: recursiveLongOpcodeTrace(futureAppID+3, 1),
+ InnerTrace: &[]model.SimulationTransactionExecTrace{
+ {ApprovalProgramTrace: &creationOpcodeTrace},
+ {},
+ {ApprovalProgramTrace: finalDepthTrace(futureAppID+6, 2)},
+ },
+ },
+ },
+ }
+ a.Equal(expectedTraceSecondTxn, resp.TxnGroups[0].Txns[1].TransactionTrace)
+
+ a.Equal(execTraceConfig, resp.ExecTraceConfig)
+}
+
+func TestSimulateScratchSlotChange(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ a := require.New(fixtures.SynchronizedTest(t))
+ var localFixture fixtures.RestClientFixture
+ localFixture.SetupNoStart(t, filepath.Join("nettemplates", "OneNodeFuture.json"))
+
+ // Get primary node
+ primaryNode, err := fixture.GetNodeController("Primary")
+ a.NoError(err)
+
+ fixture.Start()
+ defer primaryNode.FullStop()
+
+ // get lib goal client
+ testClient := fixture.LibGoalFixture.GetLibGoalClientFromNodeController(primaryNode)
+
+ _, err = testClient.WaitForRound(1)
+ a.NoError(err)
+
+ wh, err := testClient.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ addresses, err := testClient.ListAddresses(wh)
+ a.NoError(err)
+ _, senderAddress := getMaxBalAddr(t, testClient, addresses)
+ a.NotEmpty(senderAddress, "no addr with funds")
+ a.NoError(err)
+
+ ops, err := logic.AssembleString(
+ `#pragma version 8
+ global CurrentApplicationID
+ bz end
+ int 1
+ store 1
+ load 1
+ dup
+ stores
+ end:
+ int 1`)
+ a.NoError(err)
+ approval := ops.Program
+ ops, err = logic.AssembleString("#pragma version 8\nint 1")
+ a.NoError(err)
+ clearState := ops.Program
+
+ gl := basics.StateSchema{}
+ lc := basics.StateSchema{}
+
+ MinFee := config.Consensus[protocol.ConsensusFuture].MinTxnFee
+ MinBalance := config.Consensus[protocol.ConsensusFuture].MinBalance
+
+ // create app and get the application ID
+ appCreateTxn, err := testClient.MakeUnsignedAppCreateTx(
+ transactions.NoOpOC, approval, clearState, gl,
+ lc, nil, nil, nil, nil, nil, 0)
+ a.NoError(err)
+ appCreateTxn, err = testClient.FillUnsignedTxTemplate(senderAddress, 0, 0, 0, appCreateTxn)
+ a.NoError(err)
+
+ appCreateTxID, err := testClient.SignAndBroadcastTransaction(wh, nil, appCreateTxn)
+ a.NoError(err)
+ submittedAppCreateTxn, err := waitForTransaction(t, testClient, senderAddress, appCreateTxID, 30*time.Second)
+ a.NoError(err)
+ futureAppID := basics.AppIndex(*submittedAppCreateTxn.ApplicationIndex)
+
+ // fund app account
+ appFundTxn, err := testClient.SendPaymentFromWallet(
+ wh, nil, senderAddress, futureAppID.Address().String(),
+ 0, MinBalance, nil, "", 0, 0,
+ )
+ a.NoError(err)
+
+ // construct app calls
+ appCallTxn, err := testClient.MakeUnsignedAppNoOpTx(
+ uint64(futureAppID), [][]byte{}, nil, nil, nil, nil,
+ )
+ a.NoError(err)
+ appCallTxn, err = testClient.FillUnsignedTxTemplate(senderAddress, 0, 0, MinFee, appCallTxn)
+ a.NoError(err)
+
+ // Group the transactions
+ gid, err := testClient.GroupID([]transactions.Transaction{appFundTxn, appCallTxn})
+ a.NoError(err)
+ appFundTxn.Group = gid
+ appCallTxn.Group = gid
+
+ appFundTxnSigned, err := testClient.SignTransactionWithWallet(wh, nil, appFundTxn)
+ a.NoError(err)
+ appCallTxnSigned, err := testClient.SignTransactionWithWallet(wh, nil, appCallTxn)
+ a.NoError(err)
+
+ // construct simulation request, with scratch slot change enabled
+ execTraceConfig := simulation.ExecTraceConfig{
+ Enable: true,
+ Scratch: true,
+ }
+ simulateRequest := v2.PreEncodedSimulateRequest{
+ TxnGroups: []v2.PreEncodedSimulateRequestTransactionGroup{
+ {Txns: []transactions.SignedTxn{appFundTxnSigned, appCallTxnSigned}},
+ },
+ ExecTraceConfig: execTraceConfig,
+ }
+
+ // update the configuration file to enable EnableDeveloperAPI
+ err = primaryNode.FullStop()
+ a.NoError(err)
+ cfg, err := config.LoadConfigFromDisk(primaryNode.GetDataDir())
+ a.NoError(err)
+ cfg.EnableDeveloperAPI = true
+ err = cfg.SaveToDisk(primaryNode.GetDataDir())
+ require.NoError(t, err)
+ fixture.Start()
+
+ // simulate with wrong config (not enabled trace), see expected error
+ _, err = testClient.SimulateTransactions(v2.PreEncodedSimulateRequest{
+ TxnGroups: []v2.PreEncodedSimulateRequestTransactionGroup{
+ {Txns: []transactions.SignedTxn{appFundTxnSigned, appCallTxnSigned}},
+ },
+ ExecTraceConfig: simulation.ExecTraceConfig{Scratch: true},
+ })
+ a.ErrorContains(err, "basic trace must be enabled when enabling scratch slot change tracing")
+
+ // start real simulating
+ resp, err := testClient.SimulateTransactions(simulateRequest)
+ a.NoError(err)
+
+ // check if resp match expected result
+ a.Equal(execTraceConfig, resp.ExecTraceConfig)
+ a.Len(resp.TxnGroups[0].Txns, 2)
+ a.Nil(resp.TxnGroups[0].Txns[0].TransactionTrace)
+ a.NotNil(resp.TxnGroups[0].Txns[1].TransactionTrace)
+
+ expectedTraceSecondTxn := &model.SimulationTransactionExecTrace{
+ ApprovalProgramTrace: &[]model.SimulationOpcodeTraceUnit{
+ {Pc: 1},
+ {Pc: 4},
+ {Pc: 6},
+ {Pc: 9},
+ {
+ Pc: 10,
+ ScratchChanges: &[]model.ScratchChange{
+ {
+ Slot: 1,
+ NewValue: model.AvmValue{
+ Type: 2,
+ Uint: toPtr[uint64](1),
+ },
+ },
+ },
+ },
+ {Pc: 12},
+ {Pc: 14},
+ {
+ Pc: 15,
+ ScratchChanges: &[]model.ScratchChange{
+ {
+ Slot: 1,
+ NewValue: model.AvmValue{
+ Type: 2,
+ Uint: toPtr[uint64](1),
+ },
+ },
+ },
+ },
+ {Pc: 16},
+ },
+ }
+ a.Equal(expectedTraceSecondTxn, resp.TxnGroups[0].Txns[1].TransactionTrace)
+}
diff --git a/test/e2e-go/upgrades/stateproof_participation_test.go b/test/e2e-go/upgrades/stateproof_participation_test.go
index d8153dea5..8823d7c76 100644
--- a/test/e2e-go/upgrades/stateproof_participation_test.go
+++ b/test/e2e-go/upgrades/stateproof_participation_test.go
@@ -18,7 +18,6 @@ package upgrades
import (
"path/filepath"
- "strings"
"testing"
"time"
@@ -44,7 +43,7 @@ func waitUntilProtocolUpgrades(a *require.Assertions, fixture *fixtures.RestClie
startTime := time.Now()
// while consensus version has not upgraded
- for strings.Compare(string(curProtocol), string(consensusTestFastUpgrade(protocol.ConsensusV30))) == 0 {
+ for curProtocol == consensusTestFastUpgrade(protocol.ConsensusV30) {
curRound = curRound + 1
fixture.WaitForRoundWithTimeout(curRound + 1)
@@ -144,8 +143,9 @@ func getStateProofConsensus() config.ConsensusProtocols {
return consensus
}
-//TODO: copied code from other test: onlineOfflineParticipation_test.go.
-// consider how to avoid duplication
+// TODO: copied code from other test: onlineOfflineParticipation_test.go.
+//
+// consider how to avoid duplication
func waitForAccountToProposeBlock(a *require.Assertions, fixture *fixtures.RestClientFixture, account string, window int) bool {
client := fixture.AlgodClient
@@ -172,7 +172,8 @@ func waitForAccountToProposeBlock(a *require.Assertions, fixture *fixtures.RestC
}
// This test starts with participation keys in Version30, then attempts to let the richest user participate even after
-// consensus upgrade.
+//
+// consensus upgrade.
func TestParticipationWithoutStateProofKeys(t *testing.T) {
partitiontest.PartitionTest(t)
defer fixtures.ShutdownSynchronizedTest(t)
diff --git a/test/heapwatch/client_ram_report.py b/test/heapwatch/client_ram_report.py
index 51f7bcb31..29642faf1 100644
--- a/test/heapwatch/client_ram_report.py
+++ b/test/heapwatch/client_ram_report.py
@@ -47,16 +47,28 @@ multipliers = {
'TB': 1024*1024*1024*1024,
}
-# d = {k:[v,...]}
-def dapp(d, k, v):
+# d = {k: {t: v},...}
+def dapp(d, k, t, v):
l = d.get(k)
if l is None:
- d[k] = [v]
+ d[k] = {t: v}
else:
- l.append(v)
+ l[t] = v
+
+# d = {k: {t: {m: v},...},...}
+def dapp_metric(d, k, t, m, v):
+ l = d.get(k)
+ if l is None:
+ d[k] = {t: {m: v}}
+ else:
+ l2 = l.get(t)
+ if l2 is None:
+ l[t] = {m: v}
+ else:
+ l2[m] = v
def get_heap_inuse_totals(dirpath):
- '''return {"node nickname":[(YYYYmmdd_HHMMSS, bytes), ...], ...}'''
+ '''return {"node nickname": {"YYYYmmdd_HHMMSS": bytes}, ...}'''
cache_mtime = 0
cache_path = os.path.join(dirpath, 'heap_inuse_totals.json')
if os.path.exists(cache_path):
@@ -88,21 +100,50 @@ def get_heap_inuse_totals(dirpath):
logger.error('could not find total in output: %s', text)
raise Exception('could not find total in output of: %s', ' '.join([repr(x) for x in cmd]))
bytesinuse = float(m.group(1)) * multipliers[m.group(2).upper()]
- dapp(bynick, nick, (timestamp, bytesinuse))
+ dapp(bynick, nick, timestamp, bytesinuse)
logger.debug('%s ok, %s %f', path, timestamp, bytesinuse)
logger.debug('%d skipped older than cache', skipcount)
for nick, recs in bynick.items():
old = cached.get(nick)
if old is None:
- cached[nick] = sorted(recs)
+ cached[nick] = recs
else:
- cached[nick] = sorted(old + recs)
+ cached[nick].update(recs)
if cached and bynick:
with open(cache_path, 'wt') as fout:
json.dump(cached, fout)
return cached
+def get_heap_metrics(dirpath):
+ '''return {"node nickname": {"YYYYmmdd_HHMMSS": {"metric": value}, ...}, ...}'''
+ metrics_name_re = re.compile(r'(.*)\.(.*).metrics')
+ bynick = {}
+ for path in glob.glob(os.path.join(dirpath, '*.*.metrics')):
+ fname = os.path.basename(path)
+ m = metrics_name_re.match(fname)
+ if not m:
+ logger.warning('could not parse heap filename: %r', path)
+ continue
+ nick = m.group(1)
+ timestamp = m.group(2)
+ with open(path, 'rt') as fin:
+ for line in fin.readlines():
+ if line.startswith('#'):
+ continue
+ elif line.startswith('algod_go_memory_classes_heap_objects_bytes'):
+ inuse = float(line.split()[1])
+ dapp_metric(bynick, nick, timestamp, 'inuse', inuse)
+ elif line.startswith('algod_go_memory_classes_total_bytes'):
+ total = float(line.split()[1])
+ dapp_metric(bynick, nick, timestamp, 'total', total)
+ elif line.startswith('algod_go_memory_classes_heap_free_bytes'):
+ free = float(line.split()[1])
+ dapp_metric(bynick, nick, timestamp, 'free', free)
+ elif line.startswith('algod_go_memory_classes_heap_released_bytes'):
+ released = float(line.split()[1])
+ dapp_metric(bynick, nick, timestamp, 'released', released)
+ return bynick
def maybe_load_tf_nicks(args):
tf_inventory_path = os.path.join(args.dir, 'terraform-inventory.host')
@@ -121,9 +162,11 @@ def maybe_load_tf_nicks(args):
return ip_to_name
-def hostports_to_nicks(args, hostports):
+def hostports_to_nicks(args, hostports, metrics=None):
ip_to_nick = maybe_load_tf_nicks(args)
if not ip_to_nick:
+ if metrics:
+ return ['{}#{}'.format(hp, m) for hp in hostports for m in metrics]
return hostports
out = []
for hp in hostports:
@@ -138,6 +181,8 @@ def hostports_to_nicks(args, hostports):
if not hit:
hit = hp
out.append(hit)
+ if metrics:
+ return ['{}#{}'.format(hp, m) for hp in hostports for m in metrics]
return out
@@ -154,6 +199,7 @@ def main():
logging.basicConfig(level=logging.INFO)
heap_totals = get_heap_inuse_totals(args.dir)
+ heap_details = get_heap_metrics(args.dir)
if args.csv:
if args.csv == '-':
@@ -162,12 +208,18 @@ def main():
csvf = open(args.csv, 'wt')
writer = csv.writer(csvf)
whens = set()
- for nick, recs in heap_totals.items():
- for ts, n in recs:
+ col_names_target = heap_totals if heap_totals else heap_details
+ for nick, recs in col_names_target.items():
+ # {k: {t: v}}
+ for ts in recs.keys():
whens.add(ts)
whens = sorted(whens)
- nodes = sorted(heap_totals.keys())
- writer.writerow(['when','dt','round'] + hostports_to_nicks(args, nodes))
+ nodes = sorted(col_names_target.keys())
+ metrics = list(heap_details[nodes[0]].values())[0]
+ writer.writerow(
+ ['when','dt','round'] +
+ hostports_to_nicks(args, nodes, metrics=['pprof_inuse_space'] + list(metrics.keys()))
+ )
first = None
for ts in whens:
tv = time.mktime(time.strptime(ts, '%Y%m%d_%H%M%S'))
@@ -179,13 +231,16 @@ def main():
bi = json.load(open(bipath))
rnd = str(bi['block']['rnd'])
except:
- rnd = ''
+ rnd = '0'
row = [ts, tv-first, rnd]
for nick in nodes:
- for rec in heap_totals[nick]:
- if rec[0] == ts:
- row.append(rec[1])
- break
+ # {k: {t: v}}
+ val = heap_totals.get(nick, {}).get(ts)
+ row.append(val if val else 0)
+ vals = heap_details[nick].get(ts)
+ # {k: {t: {m: v}}}
+ if vals:
+ row.extend(vals.values())
writer.writerow(row)
return 0
diff --git a/test/heapwatch/plot_crr_csv.py b/test/heapwatch/plot_crr_csv.py
index f4a46cf85..d546aaff3 100755
--- a/test/heapwatch/plot_crr_csv.py
+++ b/test/heapwatch/plot_crr_csv.py
@@ -6,8 +6,18 @@ import csv
import random
from matplotlib import pyplot as plt
+from matplotlib.ticker import MaxNLocator, FuncFormatter
_meta_cols = {'when', 'dt', 'round'}
+_metrics_cols = {'free', 'inuse', 'released', 'total'}
+
+# see https://matplotlib.org/stable/gallery/lines_bars_and_markers/linestyles.html
+plt_line_styles = [
+ 'solid', 'dotted', 'dashed', 'dashdot',
+ (5, (10, 3)), # long dash with offset
+ (0, (3, 5, 1, 5)), # dashdotted
+ (0, (3, 10, 1, 10, 1, 10)), # loosely dashdotted
+]
def smin(a,b):
if a is None:
@@ -22,6 +32,27 @@ def smax(a,b):
return a
return max(a,b)
+def add_metric(d, k, m, x, y):
+ """d: {k: {m: [(x,y)]}}"""
+ mt = d.get(k)
+ if mt is None:
+ d[k] = {m: [(x,y)]}
+ else:
+ klist = mt.get(m)
+ if klist is None:
+ mt[m] = [(x,y)]
+ else:
+ klist.append((x, y))
+
+
+def format_mem(x, _):
+ if x<0:
+ return ""
+ for unit in ['bytes', 'KB', 'MB', 'GB']:
+ if x < 1024:
+ return "%3.1f %s" % (x, unit)
+ x /= 1024
+
def main():
import argparse
ap = argparse.ArgumentParser()
@@ -36,29 +67,42 @@ def main():
reader = csv.DictReader(fin)
for rec in reader:
xround = int(rec['round'])
+ row_nick = None
for k,v in rec.items():
if k in _meta_cols:
continue
- klist = fvals.get(k)
- if klist is None:
- klist = []
- fvals[k] = klist
v = float(v)
- klist.append((xround, v))
+ parts = k.split('#')
+ if len(parts) == 2:
+ row_nick = parts[0]
+ metric = parts[1]
+ else :
+ print(f"unknown column {k}")
+ row_nick = k
+ metric = k
+ add_metric(fvals, row_nick, metric, xround, v)
+
minv = smin(minv, v)
maxv = smax(maxv, v)
if not fvals:
print(f"{fname} empty")
continue
- print("{} found series {}".format(fname, sorted(fvals.keys())))
+ nodes = sorted(fvals.keys())
+ print("{} found series {}".format(fname, nodes))
fig, ax = plt.subplots()
+ ax.xaxis.set_major_locator(MaxNLocator(integer=True))
+ ax.yaxis.set_major_formatter(FuncFormatter(format_mem))
ax.set_ylabel('bytes')
ax.set_xlabel('round')
ax.set_ylim(minv,maxv)
- for k in sorted(fvals.keys()):
- xy = fvals[k]
- #for k, xy in fvals.items():
- lc = None
+
+ max_val_color = max(map(len, nodes)) * ord('z')
+ for k in nodes:
+ lc = None # let matplotlib to pick a color if there is no standard nodes name pattern => probably because of a single local run
+ if len(nodes) > 1:
+ # if there are multiple nodes choose some color based on the node name
+ s = sum(map(ord, k))
+ lc = (s/max_val_color, s/max_val_color, s/max_val_color)
if k.startswith('r'):
# blueish
lc = (0.3*random.random(), 0.3*random.random(), 0.7+(0.3*random.random()))
@@ -68,7 +112,12 @@ def main():
elif k.startswith('n'):
# reddish
lc = (0.7+(0.3*random.random()), 0.3*random.random(), 0.3*random.random())
- ax.plot([p[0] for p in xy], [p[1] for p in xy], label=k, color=lc)
+
+ metrics = fvals[k]
+ for i, metric in enumerate(metrics.keys()):
+ xy = metrics[metric]
+
+ ax.plot([p[0] for p in xy], [p[1] for p in xy], label=f'{k}/{metric}', color=lc, linestyle=plt_line_styles[i%len(plt_line_styles)])
ax.legend(loc='upper left', ncol=2)
plt.savefig(fname + '.svg', format='svg')
plt.savefig(fname + '.png', format='png')
diff --git a/test/muleCI/mule.yaml b/test/muleCI/mule.yaml
index bee9a48d3..cb75187db 100644
--- a/test/muleCI/mule.yaml
+++ b/test/muleCI/mule.yaml
@@ -40,6 +40,8 @@ agents:
- VERSION=$VERSION
- BUILD_NUMBER=$BUILD_NUMBER
- GOHOSTARCH=amd64
+ - AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
+ - AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
buildArgs:
- GOLANG_VERSION=`./scripts/get_golang_version.sh`
- ARCH=amd64
diff --git a/test/packages/test_release.sh b/test/packages/test_release.sh
index f8c8f38e6..1a66184eb 100755
--- a/test/packages/test_release.sh
+++ b/test/packages/test_release.sh
@@ -17,9 +17,9 @@ fi
OS_LIST=(
centos:7
quay.io/centos/centos:stream8
- fedora:34
- ubuntu:18.04
+ fedora:38
ubuntu:20.04
+ ubuntu:22.04
)
BUCKET=algorand-builds
diff --git a/test/platform/test_linux_amd64_compatibility.sh b/test/platform/test_linux_amd64_compatibility.sh
index 0c65db433..7e91c8728 100755
--- a/test/platform/test_linux_amd64_compatibility.sh
+++ b/test/platform/test_linux_amd64_compatibility.sh
@@ -9,9 +9,9 @@ END_FG_COLOR=$(tput sgr0 2>/dev/null)
OS_LIST=(
centos:7
quay.io/centos/centos:stream8
- fedora:28
- ubuntu:16.04
- ubuntu:18.04
+ fedora:38
+ ubuntu:20.04
+ ubuntu:22.04
)
FAILED=()
diff --git a/test/reflectionhelpers/helpers.go b/test/reflectionhelpers/helpers.go
index 79c24d5e2..6d228b061 100644
--- a/test/reflectionhelpers/helpers.go
+++ b/test/reflectionhelpers/helpers.go
@@ -20,6 +20,8 @@ import (
"fmt"
"reflect"
"strings"
+
+ "golang.org/x/exp/slices"
)
// TypeSegmentKind is a enum for the types of TypeSegment
@@ -61,9 +63,7 @@ type TypePath []TypeSegment
// Clone creates a deep copy of a TypePath
func (p TypePath) Clone() TypePath {
- cloned := make(TypePath, len(p))
- copy(cloned, p)
- return cloned
+ return slices.Clone(p)
}
// AddMapKey adds a map key segment to a TypePath. The modification is done using append, so this
@@ -177,15 +177,7 @@ func (p TypePath) ResolveValues(base reflect.Value) []reflect.Value {
// Equals returns true if and only if the input TypePath has the exact same segments as this
// TypePath.
func (p TypePath) Equals(other TypePath) bool {
- if len(p) != len(other) {
- return false
- }
- for i := range p {
- if p[i] != other[i] {
- return false
- }
- }
- return true
+ return slices.Equal(p, other)
}
func (p TypePath) String() string {
diff --git a/test/scripts/e2e_client_runner.py b/test/scripts/e2e_client_runner.py
index 7da46e2a8..7b425ef92 100755
--- a/test/scripts/e2e_client_runner.py
+++ b/test/scripts/e2e_client_runner.py
@@ -446,11 +446,25 @@ def main():
retcode = 0
capv = args.version.capitalize()
xrun(['goal', 'network', 'create', '-r', netdir, '-n', 'tbd', '-t', os.path.join(repodir, f'test/testdata/nettemplates/TwoNodes50Each{capv}.json')], timeout=90)
+ nodeDataDir = os.path.join(netdir, 'Node')
+ primaryDataDir = os.path.join(netdir, 'Primary')
+
+ # Set EnableDeveloperAPI to true for both nodes
+ for dataDir in (nodeDataDir, primaryDataDir):
+ configFile = os.path.join(dataDir, 'config.json')
+ with open(configFile, 'r') as f:
+ configOptions = json.load(f)
+
+ configOptions['EnableDeveloperAPI'] = True
+
+ with open(configFile, 'w') as f:
+ json.dump(configOptions, f)
+
xrun(['goal', 'network', 'start', '-r', netdir], timeout=90)
atexit.register(goal_network_stop, netdir, env)
- env['ALGORAND_DATA'] = os.path.join(netdir, 'Node')
- env['ALGORAND_DATA2'] = os.path.join(netdir, 'Primary')
+ env['ALGORAND_DATA'] = nodeDataDir
+ env['ALGORAND_DATA2'] = primaryDataDir
if args.unsafe_scrypt:
create_kmd_config_with_unsafe_scrypt(env['ALGORAND_DATA'])
diff --git a/test/scripts/e2e_subs/e2e-app-simulate.sh b/test/scripts/e2e_subs/e2e-app-simulate.sh
index 7efc7ae3c..0576e8aea 100755
--- a/test/scripts/e2e_subs/e2e-app-simulate.sh
+++ b/test/scripts/e2e_subs/e2e-app-simulate.sh
@@ -384,3 +384,50 @@ if [[ $(echo "$RES" | jq '."txn-groups"[0]."app-budget-consumed"') -ne 804 ]]; t
date '+app-simulate-test FAIL the app call to generated large TEAL should be consuming 804 budget %Y%m%d_%H%M%S'
false
fi
+
+###############################################################
+# WE WANT TO TEST STACK AND SCRATCH TRACE IN SIMULATION WORKS #
+###############################################################
+
+RES=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog "${DIR}/tealprogs/stack-scratch.teal" --clear-prog "${TEMPDIR}/simple-v8.teal" --extra-pages 1 2>&1 || true)
+
+EXPSUCCESS='Created app with app index'
+if [[ $RES != *"${EXPSUCCESS}"* ]]; then
+ date '+app-simulate-test FAIL the app creation for generated large TEAL should succeed %Y%m%d_%H%M%S'
+ false
+fi
+
+APPID=$(echo "$RES" | grep Created | awk '{ print $6 }')
+
+${gcmd} app call --app-id $APPID --app-arg "int:10" --from $ACCOUNT 2>&1 -o "${TEMPDIR}/stack-and-scratch.tx"
+${gcmd} clerk sign -i "${TEMPDIR}/stack-and-scratch.tx" -o "${TEMPDIR}/stack-and-scratch.stx"
+RES=$(${gcmd} clerk simulate --trace --stack --scratch -t "${TEMPDIR}/stack-and-scratch.stx")
+
+if [[ $(echo "$RES" | jq '."txn-groups" | any(has("failure-message"))') != $CONST_FALSE ]]; then
+ date '+app-simulate-test FAIL the app call for stack and scratch trace should pass %Y%m%d_%H%M%S'
+ false
+fi
+
+SCRATCH_STORE_UNIT=$(echo "$RES" | jq '."txn-groups"[0]."txn-results"[0]."exec-trace"."approval-program-trace"[-7]')
+
+if [[ $(echo "$SCRATCH_STORE_UNIT" | jq 'has("scratch-changes")') != $CONST_TRUE ]]; then
+ data '+app-simulate-test FAIL the app call for stack and scratch trace should return scratch changes at this unit %Y%m%d_%H%M%S'
+ false
+fi
+
+if [[ $(echo "$SCRATCH_STORE_UNIT" | jq '."scratch-changes" | length') != 1 ]]; then
+ data '+app-simulate-test FAIL the app call for stack and scratch trace should return scratch changes with length 1 at this unit %Y%m%d_%H%M%S'
+ false
+fi
+
+if [[ $(echo "$SCRATCH_STORE_UNIT" | jq 'has("stack-pop-count")') != $CONST_TRUE ]]; then
+ data '+app-simulate-test FAIL the app call for stack and scratch trace should return stack pop count at this unit %Y%m%d_%H%M%S'
+ false
+fi
+
+if [[ $(echo "$SCRATCH_STORE_UNIT" | jq '."stack-pop-count"') != 1 ]]; then
+ data '+app-simulate-test FAIL the app call for stack and scratch trace should return stack pop count being 1 at this unit %Y%m%d_%H%M%S'
+ false
+fi
+
+# WE DON'T TEST IN DETAILS ABOUT SCRATCH AND TRACE IN E2E SCRIPT TESTS, SEE RESTCLIENT TEST FOR DETAILS
diff --git a/test/scripts/e2e_subs/goal-account-asset.sh b/test/scripts/e2e_subs/goal-account-asset.sh
new file mode 100755
index 000000000..9df881ca5
--- /dev/null
+++ b/test/scripts/e2e_subs/goal-account-asset.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+date '+goal-account-asset-test start %Y%m%d_%H%M%S'
+
+set -e
+set -x
+set -o pipefail
+export SHELLOPTS
+
+WALLET=$1
+
+gcmd="goal -w ${WALLET}"
+
+ACCOUNTA=$(${gcmd} account list|awk '{ print $3 }')
+ACCOUNTB=$(${gcmd} account new|awk '{ print $6 }')
+
+ASSET_INDEX_PATTERN='Created asset with asset index [[:digit:]]+'
+
+# fund account B a bit
+${gcmd} clerk send -a 100000000 -f ${ACCOUNTA} -t ${ACCOUNTB}
+
+# create all assets
+RES=$(${gcmd} asset create --name "asset-a" --creator ${ACCOUNTA} --total 100 --no-clawback --no-freezer --manager ${ACCOUNTA} --no-reserve --signer ${ACCOUNTA})
+ASSET_A_ID=$(echo ${RES} | grep -Eo "${ASSET_INDEX_PATTERN}" | grep -Eo '[[:digit:]]+')
+
+RES=$(${gcmd} asset create --name "asset-b" --creator ${ACCOUNTA} --total 200 --no-clawback --no-freezer --manager ${ACCOUNTA} --no-reserve --signer ${ACCOUNTA})
+ASSET_B_ID=$(echo ${RES} | grep -Eo "${ASSET_INDEX_PATTERN}" | grep -Eo '[[:digit:]]+')
+
+RES=$(${gcmd} asset create --name "asset-c" --creator ${ACCOUNTA} --total 300 --no-clawback --no-freezer --manager ${ACCOUNTA} --no-reserve --signer ${ACCOUNTA})
+ASSET_C_ID=$(echo ${RES} | grep -Eo "${ASSET_INDEX_PATTERN}" | grep -Eo '[[:digit:]]+')
+
+RES=$(${gcmd} asset create --name "asset-d" --creator ${ACCOUNTA} --total 400 --no-clawback --no-freezer --manager ${ACCOUNTA} --no-reserve --signer ${ACCOUNTA})
+ASSET_D_ID=$(echo ${RES} | grep -Eo "${ASSET_INDEX_PATTERN}" | grep -Eo '[[:digit:]]+')
+
+# opt in all assets
+${gcmd} asset optin --account ${ACCOUNTB} --assetid ${ASSET_A_ID} --signer ${ACCOUNTB}
+${gcmd} asset optin --account ${ACCOUNTB} --assetid ${ASSET_B_ID} --signer ${ACCOUNTB}
+${gcmd} asset optin --account ${ACCOUNTB} --assetid ${ASSET_C_ID} --signer ${ACCOUNTB}
+${gcmd} asset optin --account ${ACCOUNTB} --assetid ${ASSET_D_ID} --signer ${ACCOUNTB}
+
+# displays held assets
+${gcmd} account info -a ${ACCOUNTB}
+
+# delete one of the asset
+${gcmd} asset destroy --assetid ${ASSET_B_ID} --creator ${ACCOUNTA} --signer ${ACCOUNTA}
+
+# check account info display
+RES=$(${gcmd} account info -a ${ACCOUNTB})
+
+# check result
+EXPECTED="ID ${ASSET_B_ID}, <deleted/unknown asset>"
+
+if [[ ${RES} != *"${EXPECTED}"* ]]; then
+ date '+goal-account-asset-test should list account info with deleted asset expected line %Y%m%d_%H%M%S'
+ false
+fi
diff --git a/test/scripts/e2e_subs/rawsend.sh b/test/scripts/e2e_subs/rawsend.sh
new file mode 100755
index 000000000..64f9d372b
--- /dev/null
+++ b/test/scripts/e2e_subs/rawsend.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+date '+rawsend-test start %Y%m%d_%H%M%S'
+
+set -e
+set -x
+set -o pipefail
+export SHELLOPTS
+
+WALLET=$1
+
+gcmd="goal -w ${WALLET}"
+
+ACCOUNTA=$(${gcmd} account list|awk '{ print $3 }')
+ACCOUNTB=$(${gcmd} account new|awk '{ print $6 }')
+
+# prepare the signed txn for rawsend
+${gcmd} clerk send -a 100000000 -f ${ACCOUNTA} -t ${ACCOUNTB} -o ${TEMPDIR}/send-from-a-to-b.txn
+${gcmd} clerk sign -i ${TEMPDIR}/send-from-a-to-b.txn -o ${TEMPDIR}/send-from-a-to-b.stxn
+
+# rawsend should go through
+RES=$(${gcmd} clerk rawsend -f ${TEMPDIR}/send-from-a-to-b.stxn 2>&1 || true)
+EXPERROR='rejected'
+if [[ $RES == *"${EXPERROR}"* ]]; then
+ date '+rawsend-test sending raw signed payment txn should not be rejected %Y%m%d_%H%M%S'
+ false
+fi
+
+# pending round info matching from log, should always be ascending order
+STILL_PENDING_PATTERN='still pending as of round [[:digit:]]+'
+
+PENDING_ROUNDS=$(echo ${RES} | grep -Eo "${STILL_PENDING_PATTERN}" | grep -Eo '[[:digit:]]+')
+
+echo "$PENDING_ROUNDS" | sort -nuC
+SORT_CHECK=$?
+
+if [[ ${SORT_CHECK} -ne 0 ]]; then
+ date '+rawsend-test pending rounds should be in ascending order %Y%m%d_%H%M%S'
+ false
+fi
+
+LAST_PENDING_ROUND=$(echo "$PENDING_ROUNDS" | tail -1)
+
+# prepare commmited round, and committed round should always > any of the pending rounds
+COMMITTED_PATTERN='committed in round [[:digit:]]+'
+COMMITTED_ROUND=$(echo ${RES} | grep -Eo "${COMMITTED_PATTERN}" | grep -Eo '[[:digit:]]+')
+
+if [[ ! ${COMMITTED_ROUND} -gt ${LAST_PENDING_ROUND} ]]; then
+ date '+rawsend-test pending rounds should always be smaller than committed round %Y%m%d_%H%M%S'
+ false
+fi
diff --git a/test/scripts/e2e_subs/tealprogs/stack-scratch.teal b/test/scripts/e2e_subs/tealprogs/stack-scratch.teal
new file mode 100644
index 000000000..d7d123f24
--- /dev/null
+++ b/test/scripts/e2e_subs/tealprogs/stack-scratch.teal
@@ -0,0 +1,45 @@
+#pragma version 8
+txn ApplicationID // on creation, always approve
+bz end
+
+txn NumAppArgs
+int 1
+==
+assert
+
+txn ApplicationArgs 0
+btoi
+callsub subroutine_manipulating_stack
+itob
+log
+b end
+
+subroutine_manipulating_stack:
+ proto 1 1
+ int 0 // [0]
+ dup // [0, 0]
+ dupn 4 // [0, 0, 0, 0, 0, 0]
+ frame_dig -1 // [0, 0, 0, 0, 0, 0, arg_0]
+ frame_bury 0 // [arg_0, 0, 0, 0, 0, 0]
+ dig 5 // [arg_0, 0, 0, 0, 0, 0, arg_0]
+ cover 5 // [arg_0, arg_0, 0, 0, 0, 0, 0]
+ frame_dig 0 // [arg_0, arg_0, 0, 0, 0, 0, 0, arg_0]
+ frame_dig 1 // [arg_0, arg_0, 0, 0, 0, 0, 0, arg_0, arg_0]
+ + // [arg_0, arg_0, 0, 0, 0, 0, 0, arg_0 * 2]
+ bury 7 // [arg_0 * 2, arg_0, 0, 0, 0, 0, 0]
+ popn 5 // [arg_0 * 2, arg_0]
+ uncover 1 // [arg_0, arg_0 * 2]
+ swap // [arg_0 * 2, arg_0]
+ + // [arg_0 * 3]
+ pushbytess "1!" "5!" // [arg_0 * 3, "1!", "5!"]
+ pushints 0 2 1 1 5 18446744073709551615 // [arg_0 * 3, "1!", "5!", 0, 2, 1, 1, 5, 18446744073709551615]
+ store 1 // [arg_0 * 3, "1!", "5!", 0, 2, 1, 1, 5]
+ load 1 // [arg_0 * 3, "1!", "5!", 0, 2, 1, 1, 5, 18446744073709551615]
+ stores // [arg_0 * 3, "1!", "5!", 0, 2, 1, 1]
+ load 1 // [arg_0 * 3, "1!", "5!", 0, 2, 1, 1, 18446744073709551615]
+ store 1 // [arg_0 * 3, "1!", "5!", 0, 2, 1, 1]
+ retsub
+
+end:
+ int 1
+ return \ No newline at end of file
diff --git a/test/testdata/configs/config-v27.json b/test/testdata/configs/config-v27.json
index 76d25158c..aa1f20bae 100644
--- a/test/testdata/configs/config-v27.json
+++ b/test/testdata/configs/config-v27.json
@@ -41,6 +41,7 @@
"EnableCatchupFromArchiveServers": false,
"EnableDeveloperAPI": false,
"EnableExperimentalAPI": false,
+ "EnableFollowMode": false,
"EnableGossipBlockService": true,
"EnableIncomingMessageFilter": false,
"EnableLedgerService": false,
@@ -53,6 +54,7 @@
"EnableRuntimeMetrics": false,
"EnableTopAccountsReporting": false,
"EnableTxBacklogRateLimiting": false,
+ "EnableTxnEvalTracer": false,
"EnableUsageLog": false,
"EnableVerbosedTransactionSyncLogging": false,
"EndpointAddress": "127.0.0.1:0",
@@ -96,6 +98,7 @@
"RestReadTimeoutSeconds": 15,
"RestWriteTimeoutSeconds": 120,
"RunHosted": false,
+ "StorageEngine": "",
"SuggestedFeeBlockHistory": 3,
"SuggestedFeeSlidingWindowSize": 50,
"TLSCertFile": "",
@@ -106,6 +109,7 @@
"TxBacklogReservedCapacityPerPeer": 20,
"TxBacklogServiceRateWindowSeconds": 10,
"TxBacklogSize": 26000,
+ "TxIncomingFilterMaxSize": 0,
"TxIncomingFilteringFlags": 1,
"TxPoolExponentialIncreaseFactor": 2,
"TxPoolSize": 75000,
diff --git a/test/testdata/configs/config-v28.json b/test/testdata/configs/config-v28.json
new file mode 100644
index 000000000..7b6ceb532
--- /dev/null
+++ b/test/testdata/configs/config-v28.json
@@ -0,0 +1,121 @@
+{
+ "Version": 28,
+ "AccountUpdatesStatsInterval": 5000000000,
+ "AccountsRebuildSynchronousMode": 1,
+ "AgreementIncomingBundlesQueueLength": 15,
+ "AgreementIncomingProposalsQueueLength": 50,
+ "AgreementIncomingVotesQueueLength": 20000,
+ "AnnounceParticipationKey": true,
+ "Archival": false,
+ "BaseLoggerDebugLevel": 4,
+ "BlockServiceCustomFallbackEndpoints": "",
+ "BlockServiceMemCap": 500000000,
+ "BroadcastConnectionsLimit": -1,
+ "CadaverDirectory": "",
+ "CadaverSizeTarget": 0,
+ "CatchpointFileHistoryLength": 365,
+ "CatchpointInterval": 10000,
+ "CatchpointTracking": 0,
+ "CatchupBlockDownloadRetryAttempts": 1000,
+ "CatchupBlockValidateMode": 0,
+ "CatchupFailurePeerRefreshRate": 10,
+ "CatchupGossipBlockFetchTimeoutSec": 4,
+ "CatchupHTTPBlockFetchTimeoutSec": 4,
+ "CatchupLedgerDownloadRetryAttempts": 50,
+ "CatchupParallelBlocks": 16,
+ "ConnectionsRateLimitingCount": 60,
+ "ConnectionsRateLimitingWindowSeconds": 1,
+ "DNSBootstrapID": "<network>.algorand.network?backup=<network>.algorand.net&dedup=<name>.algorand-<network>.(network|net)",
+ "DNSSecurityFlags": 1,
+ "DeadlockDetection": 0,
+ "DeadlockDetectionThreshold": 30,
+ "DisableLedgerLRUCache": false,
+ "DisableLocalhostConnectionRateLimit": true,
+ "DisableNetworking": false,
+ "DisableOutgoingConnectionThrottling": false,
+ "EnableAccountUpdatesStats": false,
+ "EnableAgreementReporting": false,
+ "EnableAgreementTimeMetrics": false,
+ "EnableAssembleStats": false,
+ "EnableBlockService": false,
+ "EnableBlockServiceFallbackToArchiver": true,
+ "EnableCatchupFromArchiveServers": false,
+ "EnableDeveloperAPI": false,
+ "EnableExperimentalAPI": false,
+ "EnableFollowMode": false,
+ "EnableGossipBlockService": true,
+ "EnableIncomingMessageFilter": false,
+ "EnableLedgerService": false,
+ "EnableMetricReporting": false,
+ "EnableOutgoingNetworkMessageFiltering": true,
+ "EnablePingHandler": true,
+ "EnableProcessBlockStats": false,
+ "EnableProfiler": false,
+ "EnableRequestLogger": false,
+ "EnableRuntimeMetrics": false,
+ "EnableTopAccountsReporting": false,
+ "EnableTxBacklogRateLimiting": false,
+ "EnableTxnEvalTracer": false,
+ "EnableUsageLog": false,
+ "EnableVerbosedTransactionSyncLogging": false,
+ "EndpointAddress": "127.0.0.1:0",
+ "FallbackDNSResolverAddress": "",
+ "ForceFetchTransactions": false,
+ "ForceRelayMessages": false,
+ "GossipFanout": 4,
+ "HeartbeatUpdateInterval": 600,
+ "IncomingConnectionsLimit": 2400,
+ "IncomingMessageFilterBucketCount": 5,
+ "IncomingMessageFilterBucketSize": 512,
+ "LedgerSynchronousMode": 2,
+ "LogArchiveMaxAge": "",
+ "LogArchiveName": "node.archive.log",
+ "LogSizeLimit": 1073741824,
+ "MaxAPIBoxPerApplication": 100000,
+ "MaxAPIResourcesPerAccount": 100000,
+ "MaxAcctLookback": 4,
+ "MaxCatchpointDownloadDuration": 43200000000000,
+ "MaxConnectionsPerIP": 15,
+ "MinCatchpointFileDownloadBytesPerSecond": 20480,
+ "NetAddress": "",
+ "NetworkMessageTraceServer": "",
+ "NetworkProtocolVersion": "",
+ "NodeExporterListenAddress": ":9100",
+ "NodeExporterPath": "./node_exporter",
+ "OptimizeAccountsDatabaseOnStartup": false,
+ "OutgoingMessageFilterBucketCount": 3,
+ "OutgoingMessageFilterBucketSize": 128,
+ "ParticipationKeysRefreshInterval": 60000000000,
+ "PeerConnectionsUpdateInterval": 3600,
+ "PeerPingPeriodSeconds": 0,
+ "PriorityPeers": {},
+ "ProposalAssemblyTime": 500000000,
+ "PublicAddress": "",
+ "ReconnectTime": 60000000000,
+ "ReservedFDs": 256,
+ "RestConnectionsHardLimit": 2048,
+ "RestConnectionsSoftLimit": 1024,
+ "RestReadTimeoutSeconds": 15,
+ "RestWriteTimeoutSeconds": 120,
+ "RunHosted": false,
+ "StorageEngine": "sqlite",
+ "SuggestedFeeBlockHistory": 3,
+ "SuggestedFeeSlidingWindowSize": 50,
+ "TLSCertFile": "",
+ "TLSKeyFile": "",
+ "TelemetryToLog": true,
+ "TransactionSyncDataExchangeRate": 0,
+ "TransactionSyncSignificantMessageThreshold": 0,
+ "TxBacklogReservedCapacityPerPeer": 20,
+ "TxBacklogServiceRateWindowSeconds": 10,
+ "TxBacklogSize": 26000,
+ "TxIncomingFilterMaxSize": 500000,
+ "TxIncomingFilteringFlags": 1,
+ "TxPoolExponentialIncreaseFactor": 2,
+ "TxPoolSize": 75000,
+ "TxSyncIntervalSeconds": 60,
+ "TxSyncServeResponseSize": 1000000,
+ "TxSyncTimeoutSeconds": 30,
+ "UseXForwardedForAddressField": "",
+ "VerifiedTranscationsCacheSize": 150000
+}
diff --git a/test/testdata/deployednettemplates/configs/reference.json b/test/testdata/deployednettemplates/configs/reference.json
index f60618fd1..f3e0149cc 100644
--- a/test/testdata/deployednettemplates/configs/reference.json
+++ b/test/testdata/deployednettemplates/configs/reference.json
@@ -9,6 +9,7 @@
"APIEndpoint3": ":8582",
"APIEndpoint4": ":8583",
"APIToken": "19b3fa5fd63074f3ec7d7052f417c423986472d253eaf275704cbc07adfe64ff",
+ "AdminAPIToken": "19b3fa5fd63074f3ec7d7052f417c423986472d253eaf275704cbc07adfe64ff",
"EnableTelemetry": true,
"TelemetryURI": "telemetry.<network>.algodev.network:9105",
"MetricsURI": "<network>.algodev.network:9106",
diff --git a/test/testdata/deployednettemplates/recipes/baseline/reference.json b/test/testdata/deployednettemplates/recipes/baseline/reference.json
index f60618fd1..f3e0149cc 100644
--- a/test/testdata/deployednettemplates/recipes/baseline/reference.json
+++ b/test/testdata/deployednettemplates/recipes/baseline/reference.json
@@ -9,6 +9,7 @@
"APIEndpoint3": ":8582",
"APIEndpoint4": ":8583",
"APIToken": "19b3fa5fd63074f3ec7d7052f417c423986472d253eaf275704cbc07adfe64ff",
+ "AdminAPIToken": "19b3fa5fd63074f3ec7d7052f417c423986472d253eaf275704cbc07adfe64ff",
"EnableTelemetry": true,
"TelemetryURI": "telemetry.<network>.algodev.network:9105",
"MetricsURI": "<network>.algodev.network:9106",
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile
index 06c946a59..f5db1ebbc 100644
--- a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile
@@ -1,6 +1,6 @@
# bootstrappedScenario is scenario1s but with pre-built 30_000_000 accountdb
-PARAMS=-w 20 -R 8 -N 20 -n 20 --npn-algod-nodes 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
-FILEPARAMS=--rounds 1600 --ntxns 20000 --naccounts 30000000 --nassets 20000 --napps 20000 --wallet-name "wallet1" --bal 100000 --bal 1000000
+PARAMS=-w 20 -R 8 -N 20 -n 20 --npn-algod-nodes 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json --last-part-key-round 50000
+FILEPARAMS=--rounds 1600 --ntxns 20000 --naccounts 30000000 --nassets 20000 --napps 20000 --wallet-name "wallet1" --bal 50000000 --bal 50000001 --deterministic
all: net.json genesis.json topology.json boostrappedFile.json
diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/boostrappedFile.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/boostrappedFile.json
index 9d1988766..93cb62978 100644
--- a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/boostrappedFile.json
+++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/boostrappedFile.json
@@ -6,7 +6,8 @@
"generatedApplicationCount": 20000,
"sourceWalletName": "wallet1",
"acctBalanceRange": [
- 100000,
- 1000000
- ]
+ 50000000,
+ 50000001
+ ],
+ "deterministicKeys": true
}
diff --git a/test/testdata/deployednettemplates/recipes/custom/configs/node.json b/test/testdata/deployednettemplates/recipes/custom/configs/node.json
index 4f95d0d05..547f38e19 100644
--- a/test/testdata/deployednettemplates/recipes/custom/configs/node.json
+++ b/test/testdata/deployednettemplates/recipes/custom/configs/node.json
@@ -1,6 +1,7 @@
{
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableBlockStats": false,
"EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
@@ -11,6 +12,7 @@
{
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableBlockStats": true,
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
diff --git a/test/testdata/deployednettemplates/recipes/custom/configs/relay.json b/test/testdata/deployednettemplates/recipes/custom/configs/relay.json
index 2f621f1a2..5dbd48a05 100644
--- a/test/testdata/deployednettemplates/recipes/custom/configs/relay.json
+++ b/test/testdata/deployednettemplates/recipes/custom/configs/relay.json
@@ -2,10 +2,11 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableBlockStats": true,
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }"
}
diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/net.json b/test/testdata/deployednettemplates/recipes/scenario1s/net.json
index ffb3bb652..10b453281 100644
--- a/test/testdata/deployednettemplates/recipes/scenario1s/net.json
+++ b/test/testdata/deployednettemplates/recipes/scenario1s/net.json
@@ -10,13 +10,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -30,13 +31,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -50,13 +52,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -70,13 +73,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -90,13 +94,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -110,13 +115,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -130,13 +136,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -150,13 +157,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -198,13 +206,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -246,13 +254,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -318,13 +326,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
+ "EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
+ "EnableMetrics": false,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -342,13 +350,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
+ "EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
+ "EnableMetrics": false,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -366,13 +374,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
+ "EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
+ "EnableMetrics": false,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -414,13 +422,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -510,13 +518,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -534,13 +542,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }"
}
]
},
diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/relay.json b/test/testdata/deployednettemplates/recipes/scenario1s/relay.json
index 563543a7b..b6a7422dd 100644
--- a/test/testdata/deployednettemplates/recipes/scenario1s/relay.json
+++ b/test/testdata/deployednettemplates/recipes/scenario1s/relay.json
@@ -2,10 +2,11 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableBlockStats": true,
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableAccountUpdatesStats\": true}"
}
diff --git a/test/testdata/deployednettemplates/recipes/scenario3s/genesis.json b/test/testdata/deployednettemplates/recipes/scenario3s/genesis.json
index df2ddd578..d3dcd590e 100644
--- a/test/testdata/deployednettemplates/recipes/scenario3s/genesis.json
+++ b/test/testdata/deployednettemplates/recipes/scenario3s/genesis.json
@@ -2584,6 +2584,7 @@
],
"FeeSink": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
"RewardsPool": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "RewardsPoolBalance": 125000000000000,
"DevMode": false,
"Comment": ""
}
diff --git a/test/testdata/deployednettemplates/recipes/scenario3s/net.json b/test/testdata/deployednettemplates/recipes/scenario3s/net.json
index fb4b8be19..0f8a010a4 100644
--- a/test/testdata/deployednettemplates/recipes/scenario3s/net.json
+++ b/test/testdata/deployednettemplates/recipes/scenario3s/net.json
@@ -10,13 +10,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -30,13 +31,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -50,13 +52,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -70,13 +73,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -90,13 +94,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -110,13 +115,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -130,13 +136,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -150,13 +157,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -170,13 +178,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -190,13 +199,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -210,13 +220,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -230,13 +241,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -250,13 +262,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -270,13 +283,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -290,13 +304,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -310,13 +325,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -330,13 +346,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -350,13 +367,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -370,13 +388,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -390,13 +409,14 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -550,13 +570,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -710,13 +730,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
+ "EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
+ "EnableMetrics": false,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -750,13 +770,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
+ "EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
+ "EnableMetrics": false,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -790,13 +810,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -1110,13 +1130,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -1270,13 +1290,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -1430,13 +1450,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -1550,13 +1570,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -1670,13 +1690,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
+ "EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
+ "EnableMetrics": false,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -1790,13 +1810,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -1830,13 +1850,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
+ "EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
+ "EnableMetrics": false,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -1870,13 +1890,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -1910,13 +1930,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
+ "EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
+ "EnableMetrics": false,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -2070,13 +2090,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -2150,13 +2170,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -2230,13 +2250,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -2310,13 +2330,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -2630,13 +2650,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
+ "EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
+ "EnableMetrics": false,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -2750,13 +2770,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -2790,13 +2810,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -2830,13 +2850,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -2870,13 +2890,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -2910,13 +2930,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -2990,13 +3010,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
+ "EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
+ "EnableMetrics": false,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -3030,13 +3050,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -3230,13 +3250,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
+ "EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
+ "EnableMetrics": false,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -3270,13 +3290,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -3310,13 +3330,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -3550,13 +3570,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -3590,13 +3610,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
+ "EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": false,
+ "EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
}
]
},
@@ -3950,13 +3970,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
+ "EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
+ "EnableMetrics": false,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -4190,13 +4210,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
+ "EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
+ "EnableMetrics": false,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
}
]
},
@@ -4310,13 +4330,13 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": true,
+ "EnableTelemetry": false,
"TelemetryURI": "{{TelemetryURI}}",
- "EnableMetrics": true,
+ "EnableMetrics": false,
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}"
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }"
}
]
},
diff --git a/test/testdata/deployednettemplates/recipes/scenario3s/relay.json b/test/testdata/deployednettemplates/recipes/scenario3s/relay.json
index f0d447a81..5401cc2c5 100644
--- a/test/testdata/deployednettemplates/recipes/scenario3s/relay.json
+++ b/test/testdata/deployednettemplates/recipes/scenario3s/relay.json
@@ -2,10 +2,11 @@
"NetAddress": "{{NetworkPort}}",
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "AdminAPIToken": "{{AdminAPIToken}}",
"EnableBlockStats": true,
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }"
}
diff --git a/test/testdata/nettemplates/DevModeTxnTracerNetwork.json b/test/testdata/nettemplates/DevModeTxnTracerNetwork.json
new file mode 100644
index 000000000..1702dd280
--- /dev/null
+++ b/test/testdata/nettemplates/DevModeTxnTracerNetwork.json
@@ -0,0 +1,36 @@
+{
+ "Genesis": {
+ "NetworkName": "devmodefollowernet",
+ "LastPartKeyRound": 3000,
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 40,
+ "Online": true
+ },
+ {
+ "Name": "Wallet2",
+ "Stake": 40,
+ "Online": true
+ },
+ {
+ "Name": "Wallet3",
+ "Stake": 20,
+ "Online": true
+ }
+ ],
+ "DevMode": true
+ },
+ "Nodes": [
+ {
+ "Name": "Node",
+ "IsRelay": false,
+ "ConfigJSONOverride": "{\"EnableTxnEvalTracer\":true}",
+ "Wallets": [
+ { "Name": "Wallet1", "ParticipationOnly": false },
+ { "Name": "Wallet2", "ParticipationOnly": false },
+ { "Name": "Wallet3", "ParticipationOnly": false }
+ ]
+ }
+ ]
+}
diff --git a/tools/block-generator/Makefile b/tools/block-generator/Makefile
new file mode 100644
index 000000000..fdb575421
--- /dev/null
+++ b/tools/block-generator/Makefile
@@ -0,0 +1,51 @@
+SCENARIO = scenarios/config.allmixed.small.yml
+SKIP = --skip-runner
+RESETDB = --reset-db
+REPORTS = ../../tmp/RUN_RUNNER_OUTPUTS
+DURATION = 30s
+VERBOSE = --verbose
+
+block-generator: clean-generator
+ go build
+
+clean-generator:
+ rm -f block-generator
+
+debug-blockgen:
+ python scripts/run_runner.py \
+ --conduit-binary ./conduit \
+ --scenario $(SCENARIO) \
+ --report-directory $(REPORTS) \
+ --keep-alive $(SKIP) \
+ --test-duration $(DURATION) \
+ $(RESETDB)
+
+enter-pg:
+ docker exec -it generator-test-container psql -U algorand -d generator_db
+
+clean-docker:
+ docker rm -f generator-test-container
+
+run-runner: block-generator
+ ./block-generator runner --conduit-binary ./conduit \
+ --keep-data-dir \
+ --test-duration $(DURATION) \
+ --conduit-log-level trace \
+ --postgres-connection-string "host=localhost user=algorand password=algorand dbname=generator_db port=15432 sslmode=disable" \
+ --scenario $(SCENARIO) \
+ $(RESETDB) \
+ $(VERBOSE) \
+ --report-directory $(REPORTS)
+
+clean-reports:
+ rm -rf $(REPORTS)
+
+pre-git-push:
+ mv _go.mod go.mod
+ mv _go.sum go.sum
+ cd ../../ && make tidy
+
+post-git-push:
+ mv go.mod _go.mod
+ mv go.sum _go.sum
+ cd ../../ && make tidy && go get github.com/lib/pq
diff --git a/tools/block-generator/README.md b/tools/block-generator/README.md
index 93e62d546..a26328ec4 100644
--- a/tools/block-generator/README.md
+++ b/tools/block-generator/README.md
@@ -2,6 +2,27 @@
This tool is used for testing Conduit import performance. It does this by generating synthetic blocks which are sent by mocking the Algod REST API endpoints that Conduit uses.
+## Benchmark Scenarios
+
+Several scenarios were designed to mimic different block traffic patterns. Scenarios can be used to test the same traffic across multiple versions of software. Each benchmark is run twice. Once with blocks containing 25000 transactions, and once with blocks containing 50000 transactions.
+
+### Organic Traffic
+
+Simulate the current mainnet traffic pattern. Approximately:
+* 15% payment transactions
+* 10% application transactions
+* 75% asset transactions
+
+With current tooling, the app transactions use boxes much more frequently than current mainnet traffic.
+
+### Payment Test (best case TPS)
+
+Blocks are entirely made up of payments. Most payments are transfers between existing accounts.
+
+### Stress Test (worst case TPS)
+
+Blocks are heavily weighted towards creating applications and boxes. This means a lot of data is being written which should translate to lower TPS.
+
## Scenario Configuration
Block generator uses a YAML config file to describe the composition of each randomly generated block. There are three levels of configuration:
@@ -10,19 +31,20 @@ Block generator uses a YAML config file to describe the composition of each rand
2. Transaction type distribution
3. Transaction type specific configuration
-At the time of writing, the block generator supports **payment** and **asset** transactions. The settings are hopefully, more or less, obvious. Distributions are specified as fractions of 1.0, and the sum of all options must add up to 1.0.
+The block generator supports **payment**, **asset**, and **application** transactions. The settings are hopefully, more or less, obvious. Distributions are specified as fractions of 1.0, and the sum of all options must add up to ~1.0.
-Here is an example which uses all of the current options. Notice that the synthetic blocks are not required to follow algod limits, in this case the block size is specified as 19999:
+Here is an example which uses all of the current options. Notice that the synthetic blocks are not required to follow algod limits, in this case the block size is specified as 99,999:
```yml
-name: "Mixed (19,999)"
+name: "Mixed (99,999)"
genesis_accounts: 10000
genesis_account_balance: 1000000000000
-tx_per_block: 19999
+tx_per_block: 99999
# transaction distribution
-tx_pay_fraction: 0.3
-tx_asset_fraction: 0.7
+tx_pay_fraction: 0.5
+tx_asset_fraction: 0.3
+tx_app_fraction: 0.2
# payment config
pay_acct_create_fraction: 0.02
@@ -34,6 +56,28 @@ asset_optin_fraction: 0.1
asset_close_fraction: 0.05
asset_xfer_fraction: 0.849
asset_delete_fraction: 0
+
+# app choice config
+app_swap_fraction: 0.5
+app_boxes_fraction: 0.5
+
+# app_swap config
+app_swap_create_fraction: 0.001
+app_swap_update_fraction: 0.001
+app_swap_delete_fraction: 0
+app_swap_optin_fraction: 0.1
+app_swap_call_fraction: 0.98
+app_swap_close_fraction: 0.005
+app_swap_clear_fraction: 0.003
+
+# app_boxes config
+app_boxes_create_fraction: 0.001
+app_boxes_update_fraction: 0.001
+app_boxes_delete_fraction: 0
+app_boxes_optin_fraction: 0.1
+app_boxes_call_fraction: 0.98
+app_boxes_close_fraction: 0.005
+app_boxes_clear_fraction: 0.003
```
## Modes
@@ -61,7 +105,7 @@ Flags:
-h, --help help for daemon
-p, --port uint Port to start the server at. (default 4010)
```
-
+
### runner
The runner mode is well suited for running the same set of tests consistently across many scenarios and for different releases. The runner mode automates this process by starting the **daemon** with many different configurations, managing a postgres database, and running a separate Conduit process configured to use them.
@@ -77,7 +121,7 @@ transaction_pay_total:30024226
transaction_pay_create_total:614242
early_average_import_time_sec:2.13
early_cumulative_import_time_sec:1083.26
-early_average_imported_tx_per_block:19999.00
+early_average_imported_tx_per_block:99999.00
early_cumulative_imported_tx_per_block:10179491
early_average_block_upload_time_sec:NaN
early_cumulative_block_upload_time_sec:0.00
@@ -88,7 +132,7 @@ early_overall_transactions_per_second:9397.09
early_uptime_seconds:3600.06
final_average_import_time_sec:2.35
final_cumulative_import_time_sec:3602.62
-final_average_imported_tx_per_block:19999.00
+final_average_imported_tx_per_block:99999.00
final_cumulative_imported_tx_per_block:30598470
final_average_block_upload_time_sec:NaN
final_cumulative_block_upload_time_sec:0.00
@@ -110,20 +154,23 @@ Usage:
Flags:
-i, --conduit-binary string Path to conduit binary.
- --cpuprofile string Path where conduit writes its CPU profile.
+ -l, --conduit-log-level string LogLevel to use when starting Conduit. [panic, fatal, error, warn, info, debug, trace] (default "error")
+ --cpuprofile string Path where Conduit writes its CPU profile.
+ -f, --genesis-file string file path to the genesis associated with the db snapshot
-h, --help help for runner
-k, --keep-data-dir If set the validator will not delete the data directory after tests complete.
- -l, --log-level string LogLevel to use when starting conduit. [panic, fatal, error, warn, info, debug, trace] (default "error")
-p, --metrics-port uint Port to start the metrics server at. (default 9999)
-c, --postgres-connection-string string Postgres connection string.
-r, --report-directory string Location to place test reports.
- --reset If set any existing report directory will be deleted before running tests.
+ --reset-db If set database will be deleted before running tests.
+ --reset-report-dir If set any existing report directory will be deleted before running tests.
-s, --scenario string Directory containing scenarios, or specific scenario file.
-d, --test-duration duration Duration to use for each scenario. (default 5m0s)
--validate If set the validator will run after test-duration has elapsed to verify data is correct. An extra line in each report indicates validator success or failure.
-```
+ -v, --verbose If set the runner will print debugging information from the generator and ledger.
+ ```
-## Example Run using Conduit and Postgres in **bash** via `run_runner.sh`
+## Example Run using Conduit and Postgres
A typical **runner** scenario involves:
@@ -132,30 +179,30 @@ A typical **runner** scenario involves:
* a datastore -such as a postgres database- to collect `conduit`'s output
* a `conduit` config file to define its import/export behavior
-`run_runner.sh` makes the following choices for the previous bullet points:
-
-* it can accept any scenario as its second argument, but defaults to [test_config.yml](./test_config.yml) when this isn't provided (this is a scenario with a lifetime of ~30 seconds)
-* knows how to import through a mock Algod running on port 11112 (which is the port the runner avails)
-* sets up a dockerized postgres database to receive conduit's output
-* configures `conduit` for these specs using [this config template](./runner/template/conduit.yml.tmpl)
+The `block-generator runner` subcommand has a number of options to configure behavion.
### Sample Run
First you'll need to get a `conduit` binary. For example you can follow the [developer portal's instructions](https://developer.algorand.org/docs/get-details/conduit/GettingStarted/#installation) or run `go build .` inside of the directory `cmd/conduit` after downloading the `conduit` repo.
-Assume you've navigated to the `tools/block-generator` directory of
-the `go-algorand` repo, and:
+Run `make install` from the `go-algorand` root, this should add `block-generator` to your path.
-* saved the conduit binary to `tools/block-generator/conduit`
-* created a block generator scenario config at `tools/block-generator/scenario.yml`
+Start a postgres container using `scripts/run_postgres.sh`. This starts a container on port 15432 a database named generator_db and a user with credentials algorand/algorand.
-Then you can execute the following command to run the scenario:
+Now run `block-generator runner` to run the test:
```sh
-./run_runner.sh ./conduit scenario.yml
+block-generator runner \
+ --conduit-binary "$CONDUIT_BINARY" \
+ --report-directory reports \
+ --test-duration 30s \
+ --conduit-log-level trace \
+ --postgres-connection-string "host=localhost user=algorand password=algorand dbname=generator_db port=15432 sslmode=disable" \
+ --scenario generator/test_scenario.yml \
+ --reset-db
```
### Scenario Report
-If all goes well, the run will generate a directory `tmp/OUTPUT_RUN_RUNNER_TEST`
-and in that directory you can see the statisticsn of the run in `scenario.report`.
+If all goes well, the run will generate a directory named reports.
+In that directory you can see the statistics of the run in the file ending with `.report`.
diff --git a/tools/block-generator/generator/config.go b/tools/block-generator/generator/config.go
new file mode 100644
index 000000000..912696f11
--- /dev/null
+++ b/tools/block-generator/generator/config.go
@@ -0,0 +1,362 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package generator
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "gopkg.in/yaml.v3"
+)
+
+// ---- types ----
+
+// TxTypeID is the transaction type.
+type TxTypeID string
+
+const (
+ genesis TxTypeID = "genesis"
+
+ // TX Distribution / ID's
+ paymentTx TxTypeID = "pay"
+ assetTx TxTypeID = "asset"
+ applicationTx TxTypeID = "appl"
+ //keyRegistrationTx TxTypeID = "keyreg"
+
+ // Payment TX Distribution / ID's
+ paymentAcctCreateTx TxTypeID = "pay_create"
+ paymentPayTx TxTypeID = "pay_pay"
+
+ // Asset TX Distribution / ID's
+ assetCreate TxTypeID = "asset_create"
+ assetDestroy TxTypeID = "asset_destroy"
+ assetOptin TxTypeID = "asset_optin"
+ assetXfer TxTypeID = "asset_xfer"
+ assetClose TxTypeID = "asset_close"
+
+ // App kind TX Distribution / ID's don't exist because these are flattened
+ // into weights across (app kinds) X (app tx type)
+
+ // App Swap TX Distribution / ID's
+ appSwapCreate TxTypeID = "app_swap_create"
+ appSwapUpdate TxTypeID = "app_swap_update"
+ appSwapDelete TxTypeID = "app_swap_delete"
+ appSwapOptin TxTypeID = "app_swap_optin"
+ appSwapCall TxTypeID = "app_swap_call"
+ appSwapClose TxTypeID = "app_swap_close"
+ appSwapClear TxTypeID = "app_swap_clear"
+
+ // App Boxes TX Distribution / ID's
+ appBoxesCreate TxTypeID = "app_boxes_create"
+ appBoxesUpdate TxTypeID = "app_boxes_update"
+ appBoxesDelete TxTypeID = "app_boxes_delete"
+ appBoxesOptin TxTypeID = "app_boxes_optin"
+ appBoxesCall TxTypeID = "app_boxes_call"
+ appBoxesClose TxTypeID = "app_boxes_close"
+ appBoxesClear TxTypeID = "app_boxes_clear"
+
+ // For reporting side-effects of higher level transactions
+ effectPaymentTxSibling = "effect_payment_sibling"
+ effectInnerTx = "effect_inner_tx"
+
+ // Defaults
+ defaultGenesisAccountsCount uint64 = 1000
+ defaultGenesisAccountInitialBalance uint64 = 1_000_000_000000 // 1 million algos per account
+
+ assetTotal uint64 = 100_000_000_000_000_000 // 100 billion units per asset
+
+ consensusTimeMilli int64 = 3300
+)
+
+type appKind uint8
+
+const (
+ appKindSwap appKind = iota
+ appKindBoxes
+)
+
+func (a appKind) String() string {
+ switch a {
+ case appKindSwap:
+ return "swap"
+ case appKindBoxes:
+ return "boxes"
+ default:
+ // Return a default value for unknown kinds.
+ return "Unknown"
+ }
+}
+
+type appTxType uint8
+
+const (
+ appTxTypeCreate appTxType = iota
+ appTxTypeUpdate
+ appTxTypeDelete
+ appTxTypeOptin
+ appTxTypeCall
+ appTxTypeClose
+ appTxTypeClear
+)
+
+func (a appTxType) String() string {
+ switch a {
+ case appTxTypeCreate:
+ return "create"
+ case appTxTypeUpdate:
+ return "update"
+ case appTxTypeDelete:
+ return "delete"
+ case appTxTypeOptin:
+ return "optin"
+ case appTxTypeCall:
+ return "call"
+ case appTxTypeClose:
+ return "close"
+ case appTxTypeClear:
+ return "clear"
+ default:
+ // Return a default value for unknown types.
+ return "Unknown"
+ }
+}
+
+func parseAppTxType(txType TxTypeID) (isApp bool, kind appKind, tx appTxType, err error) {
+ parts := strings.Split(string(txType), "_")
+
+ if len(parts) != 3 {
+ err = fmt.Errorf("invalid tx type for parsing")
+ return
+ }
+
+ if len(parts) > 1 && strings.HasPrefix(parts[0], "app") {
+ isApp = true
+ // Setting the app kind
+ switch parts[1] {
+ case "swap":
+ kind = appKindSwap
+ case "boxes":
+ kind = appKindBoxes
+ default:
+ err = fmt.Errorf("invalid app kind")
+ return
+ }
+
+ switch parts[2] {
+ case "create":
+ tx = appTxTypeCreate
+ case "update":
+ tx = appTxTypeUpdate
+ case "delete":
+ tx = appTxTypeDelete
+ case "optin":
+ tx = appTxTypeOptin
+ case "call":
+ tx = appTxTypeCall
+ case "close":
+ tx = appTxTypeClose
+ case "clear":
+ tx = appTxTypeClear
+ default:
+ err = fmt.Errorf("invalid app tx type")
+ return
+ }
+ } else {
+ err = fmt.Errorf("not an app type")
+ return
+ }
+
+ return
+}
+
+func getAppTxType(kind appKind, appType appTxType) TxTypeID {
+ return TxTypeID(fmt.Sprintf("app_%s_%s", kind, appType))
+}
+
+// GenerationConfig defines the tunable parameters for block generation.
+type GenerationConfig struct {
+ Name string `yaml:"name"`
+ NumGenesisAccounts uint64 `yaml:"genesis_accounts"`
+ GenesisAccountInitialBalance uint64 `yaml:"genesis_account_balance"`
+
+ // Block generation
+ TxnPerBlock uint64 `yaml:"tx_per_block"`
+
+ // TX Distribution
+ PaymentTransactionFraction float32 `yaml:"tx_pay_fraction"`
+ AssetTransactionFraction float32 `yaml:"tx_asset_fraction"`
+ AppTransactionFraction float32 `yaml:"tx_app_fraction"`
+
+ // Payment TX Distribution
+ PaymentNewAccountFraction float32 `yaml:"pay_acct_create_fraction"`
+ PaymentFraction float32 `yaml:"pay_xfer_fraction"`
+
+ // Asset TX Distribution
+ AssetCreateFraction float32 `yaml:"asset_create_fraction"`
+ AssetDestroyFraction float32 `yaml:"asset_destroy_fraction"`
+ AssetOptinFraction float32 `yaml:"asset_optin_fraction"`
+ AssetXferFraction float32 `yaml:"asset_xfer_fraction"`
+ AssetCloseFraction float32 `yaml:"asset_close_fraction"`
+
+ // App kind TX Distribution
+ AppSwapFraction float32 `yaml:"app_swap_fraction"`
+ AppBoxesFraction float32 `yaml:"app_boxes_fraction"`
+
+ // App Swap TX Distribution
+ AppSwapCreateFraction float32 `yaml:"app_swap_create_fraction"`
+ AppSwapUpdateFraction float32 `yaml:"app_swap_update_fraction"`
+ AppSwapDeleteFraction float32 `yaml:"app_swap_delete_fraction"`
+ AppSwapOptinFraction float32 `yaml:"app_swap_optin_fraction"`
+ AppSwapCallFraction float32 `yaml:"app_swap_call_fraction"`
+ AppSwapCloseFraction float32 `yaml:"app_swap_close_fraction"`
+ AppSwapClearFraction float32 `yaml:"app_swap_clear_fraction"`
+
+ // App Boxes TX Distribution
+ AppBoxesCreateFraction float32 `yaml:"app_boxes_create_fraction"`
+ AppBoxesUpdateFraction float32 `yaml:"app_boxes_update_fraction"`
+ AppBoxesDeleteFraction float32 `yaml:"app_boxes_delete_fraction"`
+ AppBoxesOptinFraction float32 `yaml:"app_boxes_optin_fraction"`
+ AppBoxesCallFraction float32 `yaml:"app_boxes_call_fraction"`
+ AppBoxesCloseFraction float32 `yaml:"app_boxes_close_fraction"`
+ AppBoxesClearFraction float32 `yaml:"app_boxes_clear_fraction"`
+}
+
+// ---- construction and validation ----
+
+// initializeConfigFile reads the config file and validates its parameters. Certain missing
+// parameters are defaulted to a reasonable value when missing, or when an entire associated
+// group is missing.
+func initializeConfigFile(configFile string) (config GenerationConfig, err error) {
+ var data []byte
+ data, err = os.ReadFile(configFile)
+ if err != nil {
+ return
+ }
+ err = yaml.Unmarshal(data, &config)
+ if err != nil {
+ return
+ }
+
+ err = config.validateWithDefaults(true)
+ return
+}
+
+// validateWithDefaults validates the config parameters. When defaults is true
+// certain missing parameters are defaulted to reasonable values.
+// When defaults is false, validate only without attempting to set defaults.
+func (cfg *GenerationConfig) validateWithDefaults(defaults bool) error {
+ if cfg.Name == "" {
+ return fmt.Errorf("scenario name must be set")
+ }
+
+ if cfg.NumGenesisAccounts == 0 {
+ if defaults {
+ cfg.NumGenesisAccounts = defaultGenesisAccountsCount
+ } else {
+ return fmt.Errorf("number of genesis accounts must be > 0")
+ }
+ }
+
+ if cfg.GenesisAccountInitialBalance == 0 {
+ if defaults {
+ cfg.GenesisAccountInitialBalance = defaultGenesisAccountInitialBalance
+ } else {
+ return fmt.Errorf("genesis account initial balance must be > 0")
+ }
+ }
+
+ var weights []*float32
+
+ weights = []*float32{&cfg.PaymentTransactionFraction, &cfg.AssetTransactionFraction, &cfg.AppTransactionFraction}
+ if eTxnTypes := sumIsCloseToOneWithDefault(defaults, weights...); eTxnTypes != nil {
+ return fmt.Errorf("transaction distribution ratios sum should equal 1: %w", eTxnTypes)
+ }
+
+ weights = []*float32{&cfg.PaymentNewAccountFraction, &cfg.PaymentFraction}
+ if ePymtTypes := sumIsCloseToOneWithDefault(defaults, weights...); ePymtTypes != nil {
+ return fmt.Errorf("payment configuration ratios sum should equal 1: %w", ePymtTypes)
+ }
+
+ weights = []*float32{&cfg.AssetCreateFraction, &cfg.AssetDestroyFraction, &cfg.AssetOptinFraction, &cfg.AssetCloseFraction, &cfg.AssetXferFraction}
+ if eAssetTypes := sumIsCloseToOneWithDefault(defaults, weights...); eAssetTypes != nil {
+ return fmt.Errorf("asset configuration ratios sum should equal 1: %w", eAssetTypes)
+ }
+
+ weights = []*float32{&cfg.AppSwapFraction, &cfg.AppBoxesFraction}
+ if eAppTypes := sumIsCloseToOneWithDefault(defaults, weights...); eAppTypes != nil {
+ return fmt.Errorf("app configuration ratios sum should equal 1: %w", eAppTypes)
+ }
+
+ weights = []*float32{&cfg.AppSwapCreateFraction, &cfg.AppSwapUpdateFraction, &cfg.AppSwapDeleteFraction, &cfg.AppSwapOptinFraction, &cfg.AppSwapCallFraction, &cfg.AppSwapCloseFraction, &cfg.AppSwapClearFraction}
+ if eAppSwapTypes := sumIsCloseToOneWithDefault(defaults, weights...); eAppSwapTypes != nil {
+ return fmt.Errorf("app swap configuration ratios sum should equal 1: %w", eAppSwapTypes)
+ }
+
+ weights = []*float32{&cfg.AppBoxesCreateFraction, &cfg.AppBoxesUpdateFraction, &cfg.AppBoxesDeleteFraction, &cfg.AppBoxesOptinFraction, &cfg.AppBoxesCallFraction, &cfg.AppBoxesCloseFraction, &cfg.AppBoxesClearFraction}
+ if eAppBoxesTypes := sumIsCloseToOneWithDefault(defaults, weights...); eAppBoxesTypes != nil {
+ return fmt.Errorf("app boxes configuration ratios sum should equal 1: %w", eAppBoxesTypes)
+
+ }
+
+ return nil
+}
+
+func asPtrSlice(weights []float32) []*float32 {
+ ptrs := make([]*float32, len(weights))
+ for i := range weights {
+ weight := weights[i]
+ ptrs[i] = &weight
+ }
+ return ptrs
+}
+
+// sumIsCloseToOneWithDefault returns no error if the sum of the params is close to 1.
+// It returns an error if any of the params are negative.
+// Finally, in the case that all the params are zero, it sets the first param to 1 and returns no error.
+func sumIsCloseToOneWithDefault(defaults bool, params ...*float32) error {
+ if len(params) == 0 {
+ return fmt.Errorf("no params provided")
+ }
+
+ sum, valid, err := validateSumCloseToOne(params)
+ if valid || err != nil {
+ return err
+ }
+
+ if sum == 0 && defaults {
+ *params[0] = 1
+ return nil
+ }
+
+ return fmt.Errorf("sum of params is not close to 1: %f", sum)
+}
+
+// validateSumCloseToOne returns the sum of the params, whether the sum is close to 1, and any error encountered.
+// In the case that err is not nil, the value of valid is undefined.
+func validateSumCloseToOne(params []*float32) (sum float32, valid bool, err error) {
+ for i, num := range params {
+ if *num < 0 {
+ return *num, false, fmt.Errorf("param at index %d is negative: %f", i, *num)
+ }
+ sum += *num
+ }
+ if 0.99 < sum && sum < 1.01 {
+ return sum, true, nil
+ }
+ return sum, false, nil
+}
diff --git a/tools/block-generator/generator/config_test.go b/tools/block-generator/generator/config_test.go
new file mode 100644
index 000000000..a595ad6d6
--- /dev/null
+++ b/tools/block-generator/generator/config_test.go
@@ -0,0 +1,244 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package generator
+
+import (
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestInitConfigFile(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ config, err := initializeConfigFile("test_scenario.yml")
+ require.NoError(t, err)
+ require.Equal(t, uint64(10), config.NumGenesisAccounts)
+ require.Equal(t, float32(0.25), config.AssetCloseFraction)
+ require.Equal(t, float32(0.0), config.AssetDestroyFraction)
+}
+
+func TestInitConfigFileNotExist(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ _, err := initializeConfigFile("this_is_not_a_config_file")
+
+ if _, ok := err.(*os.PathError); !ok {
+ require.Fail(t, "This should generate a path error")
+ }
+}
+
+func TestValidateWithDefaults(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ empty := func(fs ...float32) bool {
+ for _, f := range fs {
+ if f != 0 {
+ return false
+ }
+ }
+ return true
+ }
+
+ sum := func(fs ...float32) float32 {
+ s := float32(0)
+ for _, f := range fs {
+ s += f
+ }
+ return s
+ }
+
+ one := float32(1)
+
+ testCases := []struct {
+ name string
+ genCfg GenerationConfig
+ err error
+ }{
+ {
+ name: "all fields valid",
+ genCfg: GenerationConfig{
+ Name: "Test",
+ NumGenesisAccounts: 1,
+ GenesisAccountInitialBalance: 1,
+ TxnPerBlock: 1,
+ },
+ err: nil,
+ },
+ {
+ name: "just a name",
+ genCfg: GenerationConfig{Name: "Test"},
+ err: nil,
+ },
+ {
+ name: "no name",
+ genCfg: GenerationConfig{
+ NumGenesisAccounts: 1,
+ GenesisAccountInitialBalance: 1,
+ TxnPerBlock: 1,
+ },
+ err: fmt.Errorf("scenario name must be set"),
+ },
+ {
+ name: "no genesis accounts",
+ genCfg: GenerationConfig{
+ Name: "Test",
+ GenesisAccountInitialBalance: 1,
+ TxnPerBlock: 1,
+ },
+ err: nil,
+ },
+ {
+ name: "no genesis account balance",
+ genCfg: GenerationConfig{
+ Name: "Test",
+ NumGenesisAccounts: 1,
+ },
+ },
+ {
+ name: "negative",
+ genCfg: GenerationConfig{
+ Name: "Test",
+ NumGenesisAccounts: 1,
+ PaymentTransactionFraction: -0.1,
+ },
+ err: fmt.Errorf("transaction distribution ratios sum should equal 1: param at index 0 is negative: -0.100000"),
+ },
+ {
+ name: "doesn't sum to 1",
+ genCfg: GenerationConfig{
+ Name: "Test",
+ NumGenesisAccounts: 1,
+ AppBoxesCreateFraction: 0.5,
+ AppBoxesUpdateFraction: 0.5,
+ AppBoxesCallFraction: 0.5,
+ },
+ err: fmt.Errorf("app boxes configuration ratios sum should equal 1: sum of params is not close to 1: 1.500000"),
+ },
+ {
+ name: "1-defaults",
+ genCfg: GenerationConfig{
+ Name: "Test",
+ NumGenesisAccounts: 1,
+ GenesisAccountInitialBalance: 42,
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ cfg := tc.genCfg
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ emptyGenesisAccounts := cfg.NumGenesisAccounts == 0
+ emptyGenesisAccountInitialBalance := cfg.GenesisAccountInitialBalance == 0
+ emptyTxnDistributions := empty(cfg.PaymentTransactionFraction, cfg.AssetTransactionFraction, cfg.AppTransactionFraction)
+ emptyPymtFractions := empty(cfg.PaymentNewAccountFraction, cfg.PaymentFraction)
+ emptyAssetFractions := empty(cfg.AssetCreateFraction, cfg.AssetDestroyFraction, cfg.AssetOptinFraction, cfg.AssetCloseFraction, cfg.AssetXferFraction)
+ emptyAppFractions := empty(cfg.AppSwapFraction, cfg.AppBoxesFraction)
+ emptySwapFraction := empty(cfg.AppSwapCreateFraction, cfg.AppSwapUpdateFraction, cfg.AppSwapDeleteFraction, cfg.AppSwapOptinFraction, cfg.AppSwapCallFraction, cfg.AppSwapCloseFraction, cfg.AppSwapClearFraction)
+ emptyBoxesFraction := empty(cfg.AppBoxesCreateFraction, cfg.AppBoxesUpdateFraction, cfg.AppBoxesDeleteFraction, cfg.AppBoxesOptinFraction, cfg.AppBoxesCallFraction, cfg.AppBoxesCloseFraction, cfg.AppBoxesClearFraction)
+
+ err := cfg.validateWithDefaults(true)
+
+ if tc.err == nil {
+ require.Nil(t, err)
+ require.Nil(t, cfg.validateWithDefaults(false))
+
+ if emptyGenesisAccounts {
+ require.Equal(t, defaultGenesisAccountsCount, cfg.NumGenesisAccounts)
+ }
+
+ if emptyGenesisAccountInitialBalance {
+ require.Equal(t, defaultGenesisAccountInitialBalance, cfg.GenesisAccountInitialBalance)
+ }
+
+ if emptyTxnDistributions {
+ require.Equal(t, one, cfg.PaymentTransactionFraction)
+ }
+
+ if emptyPymtFractions {
+ require.Equal(t, one, cfg.PaymentNewAccountFraction)
+ }
+
+ if emptyAssetFractions {
+ require.Equal(t, one, cfg.AssetCreateFraction)
+ }
+
+ if emptyAppFractions {
+ require.Equal(t, one, cfg.AppSwapFraction)
+ }
+
+ if emptySwapFraction {
+ require.Equal(t, one, cfg.AppSwapCreateFraction)
+ }
+
+ if emptyBoxesFraction {
+ require.Equal(t, one, cfg.AppBoxesCreateFraction)
+ }
+
+ require.Equal(t, one, sum(cfg.PaymentTransactionFraction, cfg.AssetTransactionFraction, cfg.AppTransactionFraction))
+ require.Equal(t, one, sum(cfg.PaymentNewAccountFraction, cfg.PaymentFraction))
+ require.Equal(t, one, sum(cfg.AssetCreateFraction, cfg.AssetDestroyFraction, cfg.AssetOptinFraction, cfg.AssetCloseFraction, cfg.AssetXferFraction))
+ require.Equal(t, one, sum(cfg.AppSwapFraction, cfg.AppBoxesFraction))
+ require.Equal(t, one, sum(cfg.AppSwapCreateFraction, cfg.AppSwapUpdateFraction, cfg.AppSwapDeleteFraction, cfg.AppSwapOptinFraction, cfg.AppSwapCallFraction, cfg.AppSwapCloseFraction, cfg.AppSwapClearFraction))
+ require.Equal(t, one, sum(cfg.AppBoxesCreateFraction, cfg.AppBoxesUpdateFraction, cfg.AppBoxesDeleteFraction, cfg.AppBoxesOptinFraction, cfg.AppBoxesCallFraction, cfg.AppBoxesCloseFraction, cfg.AppBoxesClearFraction))
+ } else {
+ require.Equal(t, tc.err.Error(), err.Error())
+ }
+
+ })
+ }
+}
+
+func TestTxTypeParse(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ tests := []struct {
+ name string
+ txType TxTypeID
+ IsApp bool
+ Kind appKind
+ TxType appTxType
+ err string
+ }{
+ {"App Swap Create", "app_swap_create", true, appKindSwap, appTxTypeCreate, ""},
+ {"App Boxes Delete", "app_boxes_delete", true, appKindBoxes, appTxTypeDelete, ""},
+ {"not enough _'s", "app_swap", false, 0, 0, "invalid app tx type for parsing"},
+ {"too many _'s", "app_swap_delete_very_much", false, 0, 0, "invalid app tx type for parsing"},
+ {"Invalid App Kind", "app_invalid_delete", false, 0, 0, "invalid app kind"},
+ {"Invalid Tx Type", "app_boxes_invalid", false, 0, 0, "invalid app tx type"},
+ {"Not An App", "not_an_app", false, 0, 0, "not an app type"},
+ }
+
+ for _, test := range tests {
+ test := test
+ t.Run(test.name, func(t *testing.T) {
+ t.Parallel()
+ isApp, kind, txType, err := parseAppTxType(test.txType)
+
+ if test.err != "" {
+ require.Error(t, err, test.err)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, test.IsApp, isApp, "Mismatch in isApp for %s", test.txType)
+ require.Equal(t, test.Kind, kind, "Mismatch in kind for %s", test.txType)
+ require.Equal(t, test.TxType, txType, "Mismatch in txType for %s", test.txType)
+ }
+ })
+ }
+}
diff --git a/tools/block-generator/generator/generate.go b/tools/block-generator/generator/generate.go
index 48df2b06c..1a88ea0fe 100644
--- a/tools/block-generator/generator/generate.go
+++ b/tools/block-generator/generator/generate.go
@@ -17,7 +17,9 @@
package generator
import (
+ _ "embed"
"encoding/json"
+ "errors"
"fmt"
"io"
"math/rand"
@@ -25,96 +27,42 @@ import (
"time"
cconfig "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/ledger/ledgercore"
- "github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/rpcs"
- "github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/committee"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/rpcs"
+ txn "github.com/algorand/go-algorand/data/transactions"
)
-// TxTypeID is the transaction type.
-type TxTypeID string
-
-const (
- genesis TxTypeID = "genesis"
+// ---- templates ----
- // Payment Tx IDs
- paymentTx TxTypeID = "pay"
- paymentAcctCreateTx TxTypeID = "pay_create"
- assetTx TxTypeID = "asset"
- //keyRegistrationTx TxTypeID = "keyreg"
- //applicationCallTx TxTypeID = "appl"
+//go:embed teal/poap_boxes.teal
+var approvalBoxes string
- // Asset Tx IDs
- assetCreate TxTypeID = "asset_create"
- assetOptin TxTypeID = "asset_optin"
- assetXfer TxTypeID = "asset_xfer"
- assetClose TxTypeID = "asset_close"
- assetDestroy TxTypeID = "asset_destroy"
+//go:embed teal/poap_clear.teal
+var clearBoxes string
- assetTotal = uint64(100000000000000000)
+//go:embed teal/swap_amm.teal
+var approvalSwap string
- consensusTimeMilli int64 = 4500
- startingTxnCounter uint64 = 1000
-)
+//go:embed teal/swap_clear.teal
+var clearSwap string
-// GenerationConfig defines the tunable parameters for block generation.
-type GenerationConfig struct {
- Name string `yaml:"name"`
- NumGenesisAccounts uint64 `yaml:"genesis_accounts"`
- GenesisAccountInitialBalance uint64 `yaml:"genesis_account_balance"`
-
- // Block generation
- TxnPerBlock uint64 `yaml:"tx_per_block"`
-
- // TX Distribution
- PaymentTransactionFraction float32 `yaml:"tx_pay_fraction"`
- AssetTransactionFraction float32 `yaml:"tx_asset_fraction"`
-
- // Payment configuration
- PaymentNewAccountFraction float32 `yaml:"pay_acct_create_fraction"`
- PaymentFraction float32 `yaml:"pay_xfer_fraction"`
-
- // Asset configuration
- AssetCreateFraction float32 `yaml:"asset_create_fraction"`
- AssetDestroyFraction float32 `yaml:"asset_destroy_fraction"`
- AssetOptinFraction float32 `yaml:"asset_optin_fraction"`
- AssetCloseFraction float32 `yaml:"asset_close_fraction"`
- AssetXferFraction float32 `yaml:"asset_xfer_fraction"`
-}
-
-func sumIsCloseToOne(numbers ...float32) bool {
- var sum float32
- for _, num := range numbers {
- sum += num
- }
- return sum > 0.99 && sum < 1.01
-}
+// ---- constructors ----
// MakeGenerator initializes the Generator object.
-func MakeGenerator(dbround uint64, bkGenesis bookkeeping.Genesis, config GenerationConfig) (Generator, error) {
- if !sumIsCloseToOne(config.PaymentTransactionFraction, config.AssetTransactionFraction) {
- return nil, fmt.Errorf("transaction distribution ratios should equal 1")
- }
-
- if !sumIsCloseToOne(config.PaymentNewAccountFraction, config.PaymentFraction) {
- return nil, fmt.Errorf("payment configuration ratios should equal 1")
- }
-
- if !sumIsCloseToOne(config.AssetCreateFraction, config.AssetDestroyFraction, config.AssetOptinFraction, config.AssetCloseFraction, config.AssetXferFraction) {
- return nil, fmt.Errorf("asset configuration ratios should equal 1")
+func MakeGenerator(dbround uint64, bkGenesis bookkeeping.Genesis, config GenerationConfig, verbose bool) (Generator, error) {
+ if err := config.validateWithDefaults(false); err != nil {
+ return nil, fmt.Errorf("invalid generator configuration: %w", err)
}
var proto protocol.ConsensusVersion = "future"
gen := &generator{
+ verbose: verbose,
config: config,
protocol: proto,
params: cconfig.Consensus[proto],
@@ -123,13 +71,13 @@ func MakeGenerator(dbround uint64, bkGenesis bookkeeping.Genesis, config Generat
genesisID: "blockgen-test",
prevBlockHash: "",
round: 0,
- txnCounter: startingTxnCounter,
timestamp: 0,
rewardsLevel: 0,
rewardsResidue: 0,
rewardsRate: 0,
rewardsRecalculationRound: 0,
reportData: make(map[TxTypeID]TxData),
+ latestData: make(map[TxTypeID]uint64),
roundOffset: dbround,
}
@@ -143,6 +91,20 @@ func MakeGenerator(dbround uint64, bkGenesis bookkeeping.Genesis, config Generat
gen.genesisHash = bkGenesis.Hash()
}
+ gen.resetPendingApps()
+ gen.appSlice = map[appKind][]*appData{
+ appKindBoxes: make([]*appData, 0),
+ appKindSwap: make([]*appData, 0),
+ }
+ gen.appMap = map[appKind]map[uint64]*appData{
+ appKindBoxes: make(map[uint64]*appData),
+ appKindSwap: make(map[uint64]*appData),
+ }
+ gen.accountAppOptins = map[appKind]map[uint64][]uint64{
+ appKindBoxes: make(map[uint64][]uint64),
+ appKindSwap: make(map[uint64][]uint64),
+ }
+
gen.initializeAccounting()
gen.initializeLedger()
for _, val := range getTransactionOptions() {
@@ -151,17 +113,26 @@ func MakeGenerator(dbround uint64, bkGenesis bookkeeping.Genesis, config Generat
gen.transactionWeights = append(gen.transactionWeights, config.PaymentTransactionFraction)
case assetTx:
gen.transactionWeights = append(gen.transactionWeights, config.AssetTransactionFraction)
+ case applicationTx:
+ gen.transactionWeights = append(gen.transactionWeights, config.AppTransactionFraction)
+
}
}
+ if _, valid, err := validateSumCloseToOne(asPtrSlice(gen.transactionWeights)); err != nil || !valid {
+ return gen, fmt.Errorf("invalid transaction config - bad txn distribution valid=%t: %w", valid, err)
+ }
for _, val := range getPaymentTxOptions() {
switch val {
- case paymentTx:
- gen.payTxWeights = append(gen.payTxWeights, config.PaymentFraction)
case paymentAcctCreateTx:
gen.payTxWeights = append(gen.payTxWeights, config.PaymentNewAccountFraction)
+ case paymentPayTx:
+ gen.payTxWeights = append(gen.payTxWeights, config.PaymentFraction)
}
}
+ if _, valid, err := validateSumCloseToOne(asPtrSlice(gen.payTxWeights)); err != nil || !valid {
+ return gen, fmt.Errorf("invalid payment config - bad txn distribution valid=%t: %w", valid, err)
+ }
for _, val := range getAssetTxOptions() {
switch val {
@@ -177,119 +148,65 @@ func MakeGenerator(dbround uint64, bkGenesis bookkeeping.Genesis, config Generat
gen.assetTxWeights = append(gen.assetTxWeights, config.AssetCloseFraction)
}
}
+ if _, valid, err := validateSumCloseToOne(asPtrSlice(gen.assetTxWeights)); err != nil || !valid {
+ return gen, fmt.Errorf("invalid asset config - bad txn distribution valid=%t: %w", valid, err)
+ }
- return gen, nil
-}
-
-// Generator is the interface needed to generate blocks.
-type Generator interface {
- WriteReport(output io.Writer) error
- WriteGenesis(output io.Writer) error
- WriteBlock(output io.Writer, round uint64) error
- WriteAccount(output io.Writer, accountString string) error
- WriteStatus(output io.Writer) error
- WriteDeltas(output io.Writer, round uint64) error
- Accounts() <-chan basics.Address
- Stop()
-}
-
-type generator struct {
- config GenerationConfig
-
- // payment transaction metadata
- numPayments uint64
-
- // Number of algorand accounts
- numAccounts uint64
-
- // Block stuff
- round uint64
- txnCounter uint64
- prevBlockHash string
- timestamp int64
- protocol protocol.ConsensusVersion
- params cconfig.ConsensusParams
- genesis bookkeeping.Genesis
- genesisID string
- genesisHash crypto.Digest
-
- // Rewards stuff
- feeSink basics.Address
- rewardsPool basics.Address
- rewardsLevel uint64
- rewardsResidue uint64
- rewardsRate uint64
- rewardsRecalculationRound uint64
-
- // balances for all accounts. To avoid crypto and reduce storage, accounts are faked.
- // The account is based on the index into the balances array.
- balances []uint64
-
- // assets is a minimal representation of the asset holdings, it doesn't
- // include the frozen state.
- assets []*assetData
- // pendingAssets is used to hold newly created assets so that they are not used before
- // being created.
- pendingAssets []*assetData
-
- transactionWeights []float32
- payTxWeights []float32
- assetTxWeights []float32
-
- // Reporting information from transaction type to data
- reportData Report
-
- // ledger
- ledger *ledger.Ledger
-
- roundOffset uint64
-}
+ for _, val := range getAppTxOptions() {
+ switch val {
+ case appSwapCreate:
+ gen.appTxWeights = append(gen.appTxWeights, config.AppSwapFraction*config.AppSwapCreateFraction)
+ case appSwapUpdate:
+ gen.appTxWeights = append(gen.appTxWeights, config.AppSwapFraction*config.AppSwapUpdateFraction)
+ case appSwapDelete:
+ gen.appTxWeights = append(gen.appTxWeights, config.AppSwapFraction*config.AppSwapDeleteFraction)
+ case appSwapOptin:
+ gen.appTxWeights = append(gen.appTxWeights, config.AppSwapFraction*config.AppSwapOptinFraction)
+ case appSwapCall:
+ gen.appTxWeights = append(gen.appTxWeights, config.AppSwapFraction*config.AppSwapCallFraction)
+ case appSwapClose:
+ gen.appTxWeights = append(gen.appTxWeights, config.AppSwapFraction*config.AppSwapCloseFraction)
+ case appSwapClear:
+ gen.appTxWeights = append(gen.appTxWeights, config.AppSwapFraction*config.AppSwapClearFraction)
+ case appBoxesCreate:
+ gen.appTxWeights = append(gen.appTxWeights, config.AppBoxesFraction*config.AppBoxesCreateFraction)
+ case appBoxesUpdate:
+ gen.appTxWeights = append(gen.appTxWeights, config.AppBoxesFraction*config.AppBoxesUpdateFraction)
+ case appBoxesDelete:
+ gen.appTxWeights = append(gen.appTxWeights, config.AppBoxesFraction*config.AppBoxesDeleteFraction)
+ case appBoxesOptin:
+ gen.appTxWeights = append(gen.appTxWeights, config.AppBoxesFraction*config.AppBoxesOptinFraction)
+ case appBoxesCall:
+ gen.appTxWeights = append(gen.appTxWeights, config.AppBoxesFraction*config.AppBoxesCallFraction)
+ case appBoxesClose:
+ gen.appTxWeights = append(gen.appTxWeights, config.AppBoxesFraction*config.AppBoxesCloseFraction)
+ case appBoxesClear:
+ gen.appTxWeights = append(gen.appTxWeights, config.AppBoxesFraction*config.AppBoxesClearFraction)
+ }
+ }
+ if _, valid, err := validateSumCloseToOne(asPtrSlice(gen.appTxWeights)); err != nil || !valid {
+ return gen, fmt.Errorf("invalid app config - bad txn distribution valid=%t: %w", valid, err)
+ }
-type assetData struct {
- assetID uint64
- creator uint64
- name string
- // Holding at index 0 is the creator.
- holdings []*assetHolding
- // Set of holders in the holdings array for easy reference.
- holders map[uint64]*assetHolding
+ return gen, nil
}
-type assetHolding struct {
- acctIndex uint64
- balance uint64
+// initializeAccounting creates the genesis accounts.
+func (g *generator) initializeAccounting() {
+ g.numPayments = 0
+ g.numAccounts = g.config.NumGenesisAccounts
+ for i := uint64(0); i < g.config.NumGenesisAccounts; i++ {
+ g.balances = append(g.balances, g.config.GenesisAccountInitialBalance)
+ }
}
-// Report is the generation report.
-type Report map[TxTypeID]TxData
-
-// TxData is the generator report data.
-type TxData struct {
- GenerationTime time.Duration `json:"generation_time_milli"`
- GenerationCount uint64 `json:"num_generated"`
-}
-
-func track(id TxTypeID) (TxTypeID, time.Time) {
- return id, time.Now()
-}
-func (g *generator) recordData(id TxTypeID, start time.Time) {
- data := g.reportData[id]
- data.GenerationCount++
- data.GenerationTime += time.Since(start)
- g.reportData[id] = data
-}
+// ---- implement Generator interface ----
func (g *generator) WriteReport(output io.Writer) error {
return json.NewEncoder(output).Encode(g.reportData)
}
-func (g *generator) WriteStatus(output io.Writer) error {
- response := model.NodeStatusResponse{
- LastRound: g.round + g.roundOffset,
- }
- return json.NewEncoder(output).Encode(response)
-}
-
+// WriteGenesis writes the genesis file and advances the round.
func (g *generator) WriteGenesis(output io.Writer) error {
defer g.recordData(track(genesis))
@@ -305,7 +222,7 @@ func (g *generator) WriteGenesis(output io.Writer) error {
addr := indexToAccount(i)
allocations = append(allocations, bookkeeping.GenesisAllocation{
Address: addr.String(),
- State: basics.AccountData{
+ State: bookkeeping.GenesisAccountData{
MicroAlgos: basics.MicroAlgos{Raw: g.config.GenesisAccountInitialBalance},
},
})
@@ -315,7 +232,7 @@ func (g *generator) WriteGenesis(output io.Writer) error {
allocations = append(allocations, bookkeeping.GenesisAllocation{
Address: g.rewardsPool.String(),
Comment: "RewardsPool",
- State: basics.AccountData{
+ State: bookkeeping.GenesisAccountData{
MicroAlgos: basics.MicroAlgos{Raw: g.params.MinBalance},
Status: basics.NotParticipating,
},
@@ -335,141 +252,184 @@ func (g *generator) WriteGenesis(output io.Writer) error {
return err
}
-func getTransactionOptions() []interface{} {
- return []interface{}{paymentTx, assetTx}
-}
-
-func (g *generator) generateTransaction(round uint64, intra uint64) (transactions.SignedTxn, transactions.ApplyData, error) {
- selection, err := weightedSelection(g.transactionWeights, getTransactionOptions(), paymentTx)
- if err != nil {
- return transactions.SignedTxn{}, transactions.ApplyData{}, err
- }
-
- switch selection {
- case paymentTx:
- return g.generatePaymentTxn(round, intra)
- case assetTx:
- return g.generateAssetTxn(round, intra)
- default:
- return transactions.SignedTxn{}, transactions.ApplyData{}, fmt.Errorf("no generator available for %s", selection)
- }
-}
-
-func (g *generator) txnForRound(round uint64) uint64 {
- // There are no transactions in the 0th round
- if round == 0 {
- return 0
- }
- return g.config.TxnPerBlock
-}
-
-// finishRound tells the generator it can apply any pending state.
-func (g *generator) finishRound(txnCount uint64) {
- g.txnCounter += txnCount
-
- g.timestamp += consensusTimeMilli
- g.round++
-
- // Apply pending assets...
- g.assets = append(g.assets, g.pendingAssets...)
- g.pendingAssets = nil
-}
-
// WriteBlock generates a block full of new transactions and writes it to the writer.
+// The most recent round is cached, allowing requests to the same round multiple times.
+// This is motivated by the fact that Conduit's logic requests the initial round during
+// its Init() for catchup purposes, and once again when it starts ingesting blocks.
+// There are a few constraints on the generator arising from the fact that
+// blocks must be generated sequentially and that a fixed offset between the
+// database round and the generator round is presumed:
+// - requested round < offset ---> error
+// - requested round == offset: the generator will provide a genesis block or offset block
+// - requested round == generator's round + offset ---> generate a block,
+// advance the round, and cache the block in case of repeated requests.
+// - requested round == generator's round + offset - 1 ---> write the cached block
+// but do not advance the round.
+// - requested round < generator's round + offset - 1 ---> error
+//
+// NOTE: nextRound represents the generator's expectations about the next database round.
func (g *generator) WriteBlock(output io.Writer, round uint64) error {
if round < g.roundOffset {
return fmt.Errorf("cannot generate block for round %d, already in database", round)
}
- if round-g.roundOffset != g.round {
- return fmt.Errorf("generator only supports sequential block access. Expected %d but received request for %d", g.round+g.roundOffset, round)
+
+ nextRound := g.round + g.roundOffset
+ cachedRound := nextRound - 1
+
+ if round != nextRound && round != cachedRound {
+ return fmt.Errorf(
+ "generator only supports sequential block access. Expected %d or %d but received request for %d",
+ cachedRound,
+ nextRound,
+ round,
+ )
}
- numTxnForBlock := g.txnForRound(g.round)
+ // round must either be nextRound or cachedRound
- // return genesis block. offset round for non-empty database
- if round-g.roundOffset == 0 {
- // write the msgpack bytes for a block
- block, _, _ := g.ledger.BlockCert(basics.Round(round - g.roundOffset))
- // return the block with the requested round number
- block.BlockHeader.Round = basics.Round(round)
- encodedblock := rpcs.EncodedBlockCert{Block: block}
- blk := protocol.EncodeMsgp(&encodedblock)
- // write the msgpack bytes for a block
- _, err := output.Write(blk)
- if err != nil {
- return err
+ if round == cachedRound {
+ // one round behind, so write the cached block (if non-empty)
+ fmt.Printf("Received round request %d, but nextRound=%d. Not finishing round.\n", round, nextRound)
+ if len(g.latestBlockMsgp) != 0 {
+ // write the msgpack bytes for a block
+ _, err := output.Write(g.latestBlockMsgp)
+ if err != nil {
+ return err
+ }
}
- g.finishRound(numTxnForBlock)
return nil
}
+ // round == nextRound case
- header := bookkeeping.BlockHeader{
- Round: basics.Round(g.round),
- Branch: bookkeeping.BlockHash{},
- Seed: committee.Seed{},
- TxnCommitments: bookkeeping.TxnCommitments{NativeSha512_256Commitment: crypto.Digest{}},
- TimeStamp: g.timestamp,
- GenesisID: g.genesisID,
- GenesisHash: g.genesisHash,
- RewardsState: bookkeeping.RewardsState{
- FeeSink: g.feeSink,
- RewardsPool: g.rewardsPool,
- RewardsLevel: 0,
- RewardsRate: 0,
- RewardsResidue: 0,
- RewardsRecalculationRound: 0,
- },
- UpgradeState: bookkeeping.UpgradeState{
- CurrentProtocol: g.protocol,
- },
- UpgradeVote: bookkeeping.UpgradeVote{},
- TxnCounter: g.txnCounter + numTxnForBlock,
- StateProofTracking: nil,
+ err := g.startRound()
+ if err != nil {
+ return err
+ }
+ if g.round == 0 {
+ fmt.Printf("starting txnCounter: %d\n", g.txnCounter)
}
+ minTxnsForBlock := g.minTxnsForBlock(g.round)
- // Generate the transactions
- transactions := make([]transactions.SignedTxnInBlock, 0, numTxnForBlock)
+ var cert rpcs.EncodedBlockCert
+ if g.round == 0 {
+ // we'll write genesis block / offset round for non-empty database
+ cert.Block, _, _ = g.ledger.BlockCert(basics.Round(round - g.roundOffset))
+ } else {
+ g.setBlockHeader(&cert)
+
+ intra := uint64(0)
+ txGroupsAD := [][]txn.SignedTxnWithAD{}
+ for intra < minTxnsForBlock {
+ txGroupAD, numTxns, err := g.generateTxGroup(g.round, intra)
+ if err != nil {
+ return fmt.Errorf("failed to generate transaction: %w", err)
+ }
+ if len(txGroupAD) == 0 {
+ return fmt.Errorf("failed to generate transaction: no transactions given")
+ }
+ txGroupsAD = append(txGroupsAD, txGroupAD)
- for i := uint64(0); i < numTxnForBlock; i++ {
- txn, ad, err := g.generateTransaction(g.round, i)
+ intra += numTxns
+ }
+
+ vBlock, ledgerTxnCount, err := g.evaluateBlock(cert.Block.BlockHeader, txGroupsAD, int(intra))
if err != nil {
- panic(fmt.Sprintf("failed to generate transaction: %v\n", err))
+ return fmt.Errorf("failed to evaluate block: %w", err)
+ }
+ if ledgerTxnCount != g.txnCounter + intra {
+ return fmt.Errorf("evaluateBlock() txn count mismatches theoretical intra: %d != %d", ledgerTxnCount, g.txnCounter + intra)
}
- stib, err := header.EncodeSignedTxn(txn, ad)
+
+ err = g.ledger.AddValidatedBlock(*vBlock, cert.Certificate)
if err != nil {
- panic(fmt.Sprintf("failed to encode transaction: %v\n", err))
+ return fmt.Errorf("failed to add validated block: %w", err)
}
- transactions = append(transactions, stib)
- }
- if numTxnForBlock != uint64(len(transactions)) {
- panic("Unexpected number of transactions.")
- }
+ cert.Block.Payset = vBlock.Block().Payset
- cert := rpcs.EncodedBlockCert{
- Block: bookkeeping.Block{
- BlockHeader: header,
- Payset: transactions,
- },
- Certificate: agreement.Certificate{},
+ if g.verbose {
+ errs := g.introspectLedgerVsGenerator(g.round, intra)
+ if len(errs) > 0 {
+ return fmt.Errorf("introspectLedgerVsGenerator: %w", errors.Join(errs...))
+ }
+ }
}
+ cert.Block.BlockHeader.Round = basics.Round(round)
- err := g.ledger.AddBlock(cert.Block, cert.Certificate)
+ // write the msgpack bytes for a block
+ g.latestBlockMsgp = protocol.EncodeMsgp(&cert)
+ _, err = output.Write(g.latestBlockMsgp)
if err != nil {
return err
}
- // return the block with the requested round number
- cert.Block.BlockHeader.Round = basics.Round(round)
- block := protocol.EncodeMsgp(&cert)
+
+ g.finishRound()
+ return nil
+}
+
+func (g *generator) WriteAccount(output io.Writer, accountString string) error {
+ addr, err := basics.UnmarshalChecksumAddress(accountString)
if err != nil {
- return err
+ return fmt.Errorf("failed to unmarshal address: %w", err)
}
- // write the msgpack bytes for a block
- _, err = output.Write(block)
- if err != nil {
- return err
+
+ idx := accountToIndex(addr)
+
+ // Asset Holdings
+ assets := make([]model.AssetHolding, 0)
+ createdAssets := make([]model.Asset, 0)
+ for _, a := range g.assets {
+ // holdings
+ if holding := a.holders[idx]; holding != nil {
+ assets = append(assets, model.AssetHolding{
+ Amount: holding.balance,
+ AssetID: a.assetID,
+ IsFrozen: false,
+ })
+ }
+ // creator
+ if len(a.holdings) > 0 && a.holdings[0].acctIndex == idx {
+ nameBytes := []byte(a.name)
+ asset := model.Asset{
+ Index: a.assetID,
+ Params: model.AssetParams{
+ Creator: accountString,
+ Decimals: 0,
+ Clawback: &accountString,
+ Freeze: &accountString,
+ Manager: &accountString,
+ Reserve: &accountString,
+ Name: &a.name,
+ NameB64: &nameBytes,
+ Total: assetTotal,
+ },
+ }
+ asset.Params.DefaultFrozen = new(bool)
+ *(asset.Params.DefaultFrozen) = false
+ createdAssets = append(createdAssets, asset)
+ }
}
- g.finishRound(numTxnForBlock)
- return nil
+
+ data := model.Account{
+ Address: accountString,
+ Amount: g.balances[idx],
+ AmountWithoutPendingRewards: g.balances[idx],
+ AppsLocalState: nil,
+ AppsTotalExtraPages: nil,
+ AppsTotalSchema: nil,
+ Assets: &assets,
+ AuthAddr: nil,
+ CreatedApps: nil,
+ CreatedAssets: &createdAssets,
+ Participation: nil,
+ PendingRewards: 0,
+ RewardBase: nil,
+ Rewards: 0,
+ Round: g.round - 1,
+ SigType: nil,
+ Status: "Offline",
+ }
+
+ return json.NewEncoder(output).Encode(data)
}
// WriteDeltas generates returns the deltas for payset.
@@ -499,49 +459,104 @@ func (g *generator) WriteDeltas(output io.Writer, round uint64) error {
return nil
}
-// initializeAccounting creates the genesis accounts.
-func (g *generator) initializeAccounting() {
- if g.config.NumGenesisAccounts == 0 {
- panic("Number of genesis accounts must be > 0.")
+func (g *generator) WriteStatus(output io.Writer) error {
+ response := model.NodeStatusResponse{
+ LastRound: g.round + g.roundOffset,
}
+ return json.NewEncoder(output).Encode(response)
+}
- g.numPayments = 0
- g.numAccounts = g.config.NumGenesisAccounts
- for i := uint64(0); i < g.config.NumGenesisAccounts; i++ {
- g.balances = append(g.balances, g.config.GenesisAccountInitialBalance)
+// Stop cleans up allocated resources.
+func (g *generator) Stop() {
+ g.ledger.Close()
+}
+
+// ---- transaction options vectors ----
+
+func getTransactionOptions() []interface{} {
+ return []interface{}{paymentTx, assetTx, applicationTx}
+}
+
+func getPaymentTxOptions() []interface{} {
+ return []interface{}{paymentAcctCreateTx, paymentPayTx}
+}
+
+func getAssetTxOptions() []interface{} {
+ return []interface{}{assetCreate, assetDestroy, assetOptin, assetClose, assetXfer}
+}
+
+func getAppTxOptions() []interface{} {
+ return []interface{}{
+ appSwapCreate, appSwapUpdate, appSwapDelete, appSwapOptin, appSwapCall, appSwapClose, appSwapClear,
+ appBoxesCreate, appBoxesUpdate, appBoxesDelete, appBoxesOptin, appBoxesCall, appBoxesClose, appBoxesClear,
}
}
-func signTxn(txn transactions.Transaction) transactions.SignedTxn {
- stxn := transactions.SignedTxn{
- Sig: crypto.Signature{},
- Msig: crypto.MultisigSig{},
- Lsig: transactions.LogicSig{},
- Txn: txn,
- AuthAddr: basics.Address{},
+// ---- Transaction Generation (Pay/Asset/Apps) ----
+
+func (g *generator) generateTxGroup(round uint64, intra uint64) ([]txn.SignedTxnWithAD, uint64 /* numTxns */, error) {
+ selection, err := weightedSelection(g.transactionWeights, getTransactionOptions(), paymentTx)
+ if err != nil {
+ return nil, 0, err
}
- // TODO: Would it be useful to generate a random signature?
- stxn.Sig[32] = 50
+ var signedTxns []txn.SignedTxn
+ var numTxns uint64
+ var expectedID uint64
+ switch selection {
+ case paymentTx:
+ var signedTxn txn.SignedTxn
+ signedTxn, numTxns, err = g.generatePaymentTxn(round, intra)
+ signedTxns = []txn.SignedTxn{signedTxn}
+ case assetTx:
+ var signedTxn txn.SignedTxn
+ signedTxn, numTxns, expectedID, err = g.generateAssetTxn(round, intra)
+ signedTxns = []txn.SignedTxn{signedTxn}
+ case applicationTx:
+ signedTxns, numTxns, expectedID, err = g.generateAppTxn(round, intra)
+ default:
+ return nil, 0, fmt.Errorf("no generator available for %s", selection)
+ }
- return stxn
-}
+ if err != nil {
+ return nil, numTxns, fmt.Errorf("error generating transaction: %w", err)
+ }
-func getPaymentTxOptions() []interface{} {
- return []interface{}{paymentTx, paymentAcctCreateTx}
+ if len(signedTxns) == 0 {
+ return nil, numTxns, fmt.Errorf("this should never happen! no transactions generated")
+ }
+
+ txnGroupAD := make([]txn.SignedTxnWithAD, len(signedTxns))
+ for i := range signedTxns {
+ txnGroupAD[i] = txn.SignedTxnWithAD{SignedTxn: signedTxns[i]}
+
+ // for debugging:
+ g.latestPaysetWithExpectedID = append(
+ g.latestPaysetWithExpectedID,
+ txnWithExpectedID{
+ expectedID: expectedID,
+ signedTxn: &signedTxns[i],
+ intra: intra,
+ nextIntra: intra + numTxns,
+ },
+ )
+ }
+ return txnGroupAD, numTxns, nil
}
+// ---- 1. Pay Transactions ----
+
// generatePaymentTxn creates a new payment transaction. The sender is always a genesis account, the receiver is random,
// or a new account.
-func (g *generator) generatePaymentTxn(round uint64, intra uint64) (transactions.SignedTxn, transactions.ApplyData, error) {
- selection, err := weightedSelection(g.payTxWeights, getPaymentTxOptions(), paymentTx)
+func (g *generator) generatePaymentTxn(round uint64, intra uint64) (txn.SignedTxn, uint64 /* numTxns */, error) {
+ selection, err := weightedSelection(g.payTxWeights, getPaymentTxOptions(), paymentPayTx)
if err != nil {
- return transactions.SignedTxn{}, transactions.ApplyData{}, err
+ return txn.SignedTxn{}, 0, err
}
return g.generatePaymentTxnInternal(selection.(TxTypeID), round, intra)
}
-func (g *generator) generatePaymentTxnInternal(selection TxTypeID, round uint64, intra uint64) (transactions.SignedTxn, transactions.ApplyData, error) {
+func (g *generator) generatePaymentTxnInternal(selection TxTypeID, round uint64, intra uint64) (txn.SignedTxn, uint64 /* numTxns */, error) {
defer g.recordData(track(selection))
minBal := g.params.MinBalance
@@ -551,7 +566,7 @@ func (g *generator) generatePaymentTxnInternal(selection TxTypeID, round uint64,
// Select a receiver
var receiveIndex uint64
switch selection {
- case paymentTx:
+ case paymentPayTx:
receiveIndex = rand.Uint64() % g.numAccounts
case paymentAcctCreateTx:
// give new accounts get extra algos for sending other transactions
@@ -577,34 +592,50 @@ func (g *generator) generatePaymentTxnInternal(selection TxTypeID, round uint64,
g.numPayments++
- txn := g.makePaymentTxn(g.makeTxnHeader(sender, round, intra), receiver, amount, basics.Address{})
- return signTxn(txn), transactions.ApplyData{}, nil
+ transaction := g.makePaymentTxn(g.makeTxnHeader(sender, round, intra), receiver, amount, basics.Address{})
+ return signTxn(transaction), 1, nil
}
-func getAssetTxOptions() []interface{} {
- return []interface{}{assetCreate, assetDestroy, assetOptin, assetXfer, assetClose}
+// ---- 2. Asset Transactions ----
+
+func (g *generator) generateAssetTxn(round uint64, intra uint64) (txn.SignedTxn, uint64 /* numTxns */, uint64 /* assetID */, error) {
+ start := time.Now()
+ selection, err := weightedSelection(g.assetTxWeights, getAssetTxOptions(), assetXfer)
+ if err != nil {
+ return txn.SignedTxn{}, 0, 0, err
+ }
+
+ actual, transaction, assetID := g.generateAssetTxnInternal(selection.(TxTypeID), round, intra)
+ defer g.recordData(actual, start)
+
+ if transaction.Type == "" {
+ fmt.Println("Empty asset transaction.")
+ os.Exit(1)
+ }
+
+ return signTxn(transaction), 1, assetID, nil
}
-func (g *generator) generateAssetTxnInternal(txType TxTypeID, round uint64, intra uint64) (actual TxTypeID, txn transactions.Transaction) {
+func (g *generator) generateAssetTxnInternal(txType TxTypeID, round uint64, intra uint64) (actual TxTypeID, txn txn.Transaction, assetID uint64) {
return g.generateAssetTxnInternalHint(txType, round, intra, 0, nil)
}
-func (g *generator) generateAssetTxnInternalHint(txType TxTypeID, round uint64, intra uint64, hintIndex uint64, hint *assetData) (actual TxTypeID, txn transactions.Transaction) {
+func (g *generator) generateAssetTxnInternalHint(txType TxTypeID, round uint64, intra uint64, hintIndex uint64, hint *assetData) (actual TxTypeID, txn txn.Transaction, assetID uint64) {
actual = txType
// If there are no assets the next operation needs to be a create.
- if len(g.assets) == 0 {
+ numAssets := uint64(len(g.assets))
+
+ if numAssets == 0 {
actual = assetCreate
}
-
- numAssets := uint64(len(g.assets))
var senderIndex uint64
if actual == assetCreate {
- numAssets = uint64(len(g.assets)) + uint64(len(g.pendingAssets))
+ numAssets += uint64(len(g.pendingAssets))
senderIndex = numAssets % g.config.NumGenesisAccounts
senderAcct := indexToAccount(senderIndex)
total := assetTotal
- assetID := g.txnCounter + intra + 1
+ assetID = g.txnCounter + intra + 1
assetName := fmt.Sprintf("asset #%d", assetID)
txn = g.makeAssetCreateTxn(g.makeTxnHeader(senderAcct, round, intra), total, false, assetName)
// Compute asset ID and initialize holdings
@@ -622,11 +653,14 @@ func (g *generator) generateAssetTxnInternalHint(txType TxTypeID, round uint64,
g.pendingAssets = append(g.pendingAssets, &a)
} else {
- assetIndex := rand.Uint64() % numAssets
- asset := g.assets[assetIndex]
+ var assetIndex uint64
+ var asset *assetData
if hint != nil {
assetIndex = hintIndex
asset = hint
+ } else {
+ assetIndex = rand.Uint64() % numAssets
+ asset = g.assets[assetIndex]
}
switch actual {
@@ -640,7 +674,8 @@ func (g *generator) generateAssetTxnInternalHint(txType TxTypeID, round uint64,
senderIndex = asset.creator
creator := indexToAccount(senderIndex)
- txn = g.makeAssetDestroyTxn(g.makeTxnHeader(creator, round, intra), asset.assetID)
+ assetID = asset.assetID
+ txn = g.makeAssetDestroyTxn(g.makeTxnHeader(creator, round, intra), assetID)
// Remove asset by moving the last element to the deleted index then trimming the slice.
g.assets[assetIndex] = g.assets[numAssets-1]
@@ -660,7 +695,8 @@ func (g *generator) generateAssetTxnInternalHint(txType TxTypeID, round uint64,
exists = asset.holders[senderIndex] != nil
}
account := indexToAccount(senderIndex)
- txn = g.makeAssetAcceptanceTxn(g.makeTxnHeader(account, round, intra), asset.assetID)
+ assetID = asset.assetID
+ txn = g.makeAssetAcceptanceTxn(g.makeTxnHeader(account, round, intra), assetID)
holding := assetHolding{
acctIndex: senderIndex,
@@ -683,14 +719,15 @@ func (g *generator) generateAssetTxnInternalHint(txType TxTypeID, round uint64,
receiver := indexToAccount(asset.holdings[receiverArrayIndex].acctIndex)
amount := uint64(10)
- txn = g.makeAssetTransferTxn(g.makeTxnHeader(sender, round, intra), receiver, amount, basics.Address{}, asset.assetID)
+ assetID = asset.assetID
+ txn = g.makeAssetTransferTxn(g.makeTxnHeader(sender, round, intra), receiver, amount, basics.Address{}, assetID)
if asset.holdings[0].balance < amount {
- fmt.Printf("\n\ncreator doesn't have enough funds for asset %d\n\n", asset.assetID)
+ fmt.Printf("\n\ncreator doesn't have enough funds for asset %d\n\n", assetID)
os.Exit(1)
}
if g.balances[asset.holdings[0].acctIndex] < g.params.MinTxnFee {
- fmt.Printf("\n\ncreator doesn't have enough funds for transaction %d\n\n", asset.assetID)
+ fmt.Printf("\n\ncreator doesn't have enough funds for transaction %d\n\n", assetID)
os.Exit(1)
}
@@ -712,8 +749,9 @@ func (g *generator) generateAssetTxnInternalHint(txType TxTypeID, round uint64,
closeToAcctIndex := asset.holdings[0].acctIndex
closeToAcct := indexToAccount(closeToAcctIndex)
+ assetID = asset.assetID
txn = g.makeAssetTransferTxn(
- g.makeTxnHeader(sender, round, intra), closeToAcct, 0, closeToAcct, asset.assetID)
+ g.makeTxnHeader(sender, round, intra), closeToAcct, 0, closeToAcct, assetID)
asset.holdings[0].balance += asset.holdings[closeIndex].balance
@@ -734,137 +772,55 @@ func (g *generator) generateAssetTxnInternalHint(txType TxTypeID, round uint64,
fmt.Printf("\n\nthe sender account does not have enough algos for the transfer. idx %d, asset transaction type %v, num %d\n\n", senderIndex, actual, g.reportData[actual].GenerationCount)
os.Exit(1)
}
- g.balances[senderIndex] -= txn.Fee.ToUint64()
- return
-}
-func (g *generator) generateAssetTxn(round uint64, intra uint64) (transactions.SignedTxn, transactions.ApplyData, error) {
- start := time.Now()
- selection, err := weightedSelection(g.assetTxWeights, getAssetTxOptions(), assetXfer)
- if err != nil {
- return transactions.SignedTxn{}, transactions.ApplyData{}, err
- }
-
- actual, txn := g.generateAssetTxnInternal(selection.(TxTypeID), round, intra)
- defer g.recordData(actual, start)
-
- if txn.Type == "" {
- fmt.Println("Empty asset transaction.")
+ if assetID == 0 {
+ fmt.Printf("\n\nthis should never happen: assetID is 0 but should have been set by \ngenerateAssetTxnInternalHint(txType=%s, round=%d, intra=%d, hintIndex=%d, hintIsNil=%t)\nactual=%s\n\n",
+ txType, round, intra, hintIndex, hint == nil, actual)
os.Exit(1)
}
- return signTxn(txn), transactions.ApplyData{}, nil
-}
+ g.balances[senderIndex] -= txn.Fee.ToUint64()
-func (g *generator) initializeLedger() {
- genBal := convertToGenesisBalances(g.balances)
- // add rewards pool with min balance
- genBal[g.rewardsPool] = basics.AccountData{
- MicroAlgos: basics.MicroAlgos{Raw: g.params.MinBalance},
- }
- bal := bookkeeping.MakeGenesisBalances(genBal, g.feeSink, g.rewardsPool)
- block, err := bookkeeping.MakeGenesisBlock(g.protocol, bal, g.genesisID, g.genesisHash)
- if err != nil {
- fmt.Printf("error making genesis: %v\n.", err)
- os.Exit(1)
- }
- var prefix string
- if g.genesisID == "" {
- prefix = "block-generator"
- } else {
- prefix = g.genesisID
- }
- l, err := ledger.OpenLedger(logging.Base(), prefix, true, ledgercore.InitState{
- Block: block,
- Accounts: bal.Balances,
- GenesisHash: g.genesisHash,
- }, cconfig.GetDefaultLocal())
- if err != nil {
- fmt.Printf("error initializing ledger: %v\n.", err)
- os.Exit(1)
- }
- g.ledger = l
+ return
}
-// Stop cleans up allocated resources.
-func (g *generator) Stop() {
- g.ledger.Close()
+// ---- metric data recorders ----
+
+func track(id TxTypeID) (TxTypeID, time.Time) {
+ return id, time.Now()
}
-func (g *generator) WriteAccount(output io.Writer, accountString string) error {
- addr, err := basics.UnmarshalChecksumAddress(accountString)
- if err != nil {
- return fmt.Errorf("failed to unmarshal address: %w", err)
- }
+func (g *generator) recordData(id TxTypeID, start time.Time) {
+ g.latestData[id]++
+ data := g.reportData[id]
+ data.GenerationCount += 1
+ data.GenerationTime += time.Since(start)
+ g.reportData[id] = data
+}
- idx := accountToIndex(addr)
+// ---- sign transactions ----
- // Asset Holdings
- assets := make([]model.AssetHolding, 0)
- createdAssets := make([]model.Asset, 0)
- for _, a := range g.assets {
- // holdings
- if holding := a.holders[idx]; holding != nil {
- assets = append(assets, model.AssetHolding{
- Amount: holding.balance,
- AssetID: a.assetID,
- IsFrozen: false,
- })
- }
- // creator
- if len(a.holdings) > 0 && a.holdings[0].acctIndex == idx {
- nameBytes := []byte(a.name)
- asset := model.Asset{
- Index: a.assetID,
- Params: model.AssetParams{
- Creator: accountString,
- Decimals: 0,
- Clawback: &accountString,
- Freeze: &accountString,
- Manager: &accountString,
- Reserve: &accountString,
- Name: &a.name,
- NameB64: &nameBytes,
- Total: assetTotal,
- },
- }
- asset.Params.DefaultFrozen = new(bool)
- *(asset.Params.DefaultFrozen) = false
- createdAssets = append(createdAssets, asset)
- }
+func signTxn(transaction txn.Transaction) txn.SignedTxn {
+ stxn := txn.SignedTxn{
+ Msig: crypto.MultisigSig{},
+ Lsig: txn.LogicSig{},
+ Txn: transaction,
+ AuthAddr: basics.Address{},
}
- data := model.Account{
- Address: accountString,
- Amount: g.balances[idx],
- AmountWithoutPendingRewards: g.balances[idx],
- AppsLocalState: nil,
- AppsTotalExtraPages: nil,
- AppsTotalSchema: nil,
- Assets: &assets,
- AuthAddr: nil,
- CreatedApps: nil,
- CreatedAssets: &createdAssets,
- Participation: nil,
- PendingRewards: 0,
- RewardBase: nil,
- Rewards: 0,
- Round: g.round - 1,
- SigType: nil,
- Status: "Offline",
- }
+ addSignature(&stxn)
- return json.NewEncoder(output).Encode(data)
+ return stxn
}
-// Accounts is used in the runner to generate a list of addresses.
-func (g *generator) Accounts() <-chan basics.Address {
- results := make(chan basics.Address, 10)
- go func() {
- defer close(results)
- for i := uint64(0); i < g.numAccounts; i++ {
- results <- indexToAccount(i)
- }
- }()
- return results
+func addSignature(stxn *txn.SignedTxn) {
+ stxn.Sig = crypto.Signature{}
+ // TODO: Would it be useful to generate a random signature?
+ stxn.Sig[32] = 50
+}
+
+func reSignTxns(signedTxns []txn.SignedTxn) {
+ for i := range signedTxns {
+ addSignature(&signedTxns[i])
+ }
}
diff --git a/tools/block-generator/generator/generate_apps.go b/tools/block-generator/generator/generate_apps.go
new file mode 100644
index 000000000..5ecac947c
--- /dev/null
+++ b/tools/block-generator/generator/generate_apps.go
@@ -0,0 +1,259 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package generator
+
+import (
+ "fmt"
+ "math/rand"
+ "time"
+
+ txn "github.com/algorand/go-algorand/data/transactions"
+)
+
+// ---- generator app state ----
+
+func (g *generator) resetPendingApps() {
+ g.pendingAppSlice = map[appKind][]*appData{
+ appKindBoxes: make([]*appData, 0),
+ appKindSwap: make([]*appData, 0),
+ }
+ g.pendingAppMap = map[appKind]map[uint64]*appData{
+ appKindBoxes: make(map[uint64]*appData),
+ appKindSwap: make(map[uint64]*appData),
+ }
+}
+
+// ---- effects and consequences ----
+
+// effects is a map that contains the hard-coded non-trivial
+// consequents of a transaction type.
+// The "sibling" transactions are added to an atomic transaction group
+// in a "makeXyzTransaction" function defined in make_transactions.go.
+// The "inner" transactions are created inside the TEAL programs. See:
+// * teal/poap_boxes.teal
+// * teal/swap_amm.teal
+//
+// appBoxesCreate: 1 sibling payment tx
+// appBoxesOptin: 1 sibling payment tx, 2 inner tx
+var effects = map[TxTypeID][]TxEffect{
+ appBoxesCreate: {
+ {effectPaymentTxSibling, 1},
+ },
+ appBoxesOptin: {
+ {effectPaymentTxSibling, 1},
+ {effectInnerTx, 2},
+ },
+}
+
+func countEffects(actual TxTypeID) uint64 {
+ cnt := uint64(0)
+ if effectsFromActual, ok := effects[actual]; ok {
+ for _, effect := range effectsFromActual {
+ cnt += effect.count
+ }
+ }
+ return cnt
+}
+
+func CumulativeEffects(report Report) EffectsReport {
+ effsReport := make(EffectsReport)
+ for txType, data := range report {
+ rootCount := data.GenerationCount
+ effsReport[string(txType)] += rootCount
+ for _, effect := range effects[txType] {
+ effsReport[effect.effect] += effect.count * rootCount
+ }
+ }
+ return effsReport
+}
+
+// ---- 3. App Transactions ----
+
+func (g *generator) generateAppTxn(round uint64, intra uint64) ([]txn.SignedTxn, uint64 /* numTxns */, uint64 /* appID */, error) {
+ start := time.Now()
+ selection, err := weightedSelection(g.appTxWeights, getAppTxOptions(), appSwapCall)
+ if err != nil {
+ return nil, 0, 0, err
+ }
+
+ actual, signedTxns, appID, err := g.generateAppCallInternal(selection.(TxTypeID), round, intra, nil)
+ if err != nil {
+ return nil, 0, appID, fmt.Errorf("unexpected error received from generateAppCallInternal(): %w", err)
+ }
+
+ numTxns := 1 + countEffects(actual) // +1 for actual
+ g.recordData(actual, start)
+ return signedTxns, numTxns, appID, nil
+}
+
+// generateAppCallInternal is the main workhorse for generating app transactions.
+// Senders are always genesis accounts to avoid running out of funds.
+func (g *generator) generateAppCallInternal(txType TxTypeID, round, intra uint64, hintApp *appData) (TxTypeID, []txn.SignedTxn, uint64 /* appID */, error) {
+ var senderIndex uint64
+ if hintApp != nil {
+ senderIndex = hintApp.sender
+ } else {
+ senderIndex = rand.Uint64() % g.config.NumGenesisAccounts
+ }
+ senderAcct := indexToAccount(senderIndex)
+
+ actual, kind, appCallType, appID, err := g.getActualAppCall(txType, senderIndex)
+ if err != nil {
+ return "", nil, appID, err
+ }
+ if hintApp != nil && hintApp.appID != 0 {
+ // can only override the appID when non-zero in hintApp
+ appID = hintApp.appID
+ }
+ // WLOG: the matched cases below are now well-defined thanks to getActualAppCall()
+
+ var signedTxns []txn.SignedTxn
+ switch appCallType {
+ case appTxTypeCreate:
+ appID = g.txnCounter + intra + 1
+ signedTxns = g.makeAppCreateTxn(kind, senderAcct, round, intra, appID)
+ reSignTxns(signedTxns)
+
+ for k := range g.appMap {
+ if g.appMap[k][appID] != nil {
+ return "", nil, appID, fmt.Errorf("should never happen! app %d already exists for kind %s", appID, k)
+ }
+ if g.pendingAppMap[k][appID] != nil {
+ return "", nil, appID, fmt.Errorf("should never happen! app %d already pending for kind %s", appID, k)
+ }
+ }
+
+ ad := &appData{
+ appID: appID,
+ sender: senderIndex,
+ kind: kind,
+ optins: map[uint64]bool{},
+ }
+
+ g.pendingAppSlice[kind] = append(g.pendingAppSlice[kind], ad)
+ g.pendingAppMap[kind][appID] = ad
+
+ case appTxTypeOptin:
+ signedTxns = g.makeAppOptinTxn(senderAcct, round, intra, kind, appID)
+ reSignTxns(signedTxns)
+ if g.pendingAppMap[kind][appID] == nil {
+ ad := &appData{
+ appID: appID,
+ sender: senderIndex,
+ kind: kind,
+ optins: map[uint64]bool{},
+ }
+ g.pendingAppMap[kind][appID] = ad
+ g.pendingAppSlice[kind] = append(g.pendingAppSlice[kind], ad)
+ }
+ g.pendingAppMap[kind][appID].optins[senderIndex] = true
+
+ case appTxTypeCall:
+ signedTxns = []txn.SignedTxn{
+ signTxn(g.makeAppCallTxn(senderAcct, round, intra, appID)),
+ }
+
+ default:
+ return "", nil, appID, fmt.Errorf("unimplemented: invalid transaction type <%s> for app %d", appCallType, appID)
+ }
+
+ return actual, signedTxns, appID, nil
+}
+
+func (g *generator) getAppData(existing bool, kind appKind, senderIndex, appID uint64) (*appData, bool /* appInMap */, bool /* senderOptedin */) {
+ var appMapOrPendingAppMap map[appKind]map[uint64]*appData
+ if existing {
+ appMapOrPendingAppMap = g.appMap
+ } else {
+ appMapOrPendingAppMap = g.pendingAppMap
+ }
+
+ ad, ok := appMapOrPendingAppMap[kind][appID]
+ if !ok {
+ return nil, false, false
+ }
+ if !ad.optins[senderIndex] {
+ return ad, true, false
+ }
+ return ad, true, true
+}
+
+// getActualAppCall returns the actual transaction type, app kind, app transaction type and appID
+// * it returns actual = txType if there aren't any problems (for example create always is kept)
+// * it creates the app if the app of the given kind doesn't exist
+// * it switches to noopoc instead of optin when already opted into existing apps
+// * it switches to create instead of optin when only opted into pending apps
+// * it switches to optin when noopoc if not opted in and follows the logic of the optins above
+// * the appID is 0 for creates, and otherwise a random appID from the existing apps for the kind
+func (g *generator) getActualAppCall(txType TxTypeID, senderIndex uint64) (TxTypeID, appKind, appTxType, uint64 /* appID */, error) {
+ isApp, kind, appTxType, err := parseAppTxType(txType)
+ if err != nil {
+ return "", 0, 0, 0, err
+ }
+ if !isApp {
+ return "", 0, 0, 0, fmt.Errorf("should be an app but not parsed that way: %v", txType)
+ }
+
+ // creates get a quick pass:
+ if appTxType == appTxTypeCreate {
+ return txType, kind, appTxTypeCreate, 0, nil
+ }
+
+ numAppsForKind := uint64(len(g.appSlice[kind]))
+ if numAppsForKind == 0 {
+ // can't do anything else with the app if it doesn't exist, so must create it first
+ return getAppTxType(kind, appTxTypeCreate), kind, appTxTypeCreate, 0, nil
+ }
+
+ if appTxType == appTxTypeOptin {
+ // pick a random app to optin:
+ appID := g.appSlice[kind][rand.Uint64()%numAppsForKind].appID
+
+ _, exists, optedIn := g.getAppData(true /* existing */, kind, senderIndex, appID)
+ if !exists {
+ return txType, kind, appTxType, appID, fmt.Errorf("should never happen! app %d of kind %s does not exist", appID, kind)
+ }
+
+ if optedIn {
+ // already opted in, so call the app instead:
+ return getAppTxType(kind, appTxTypeCall), kind, appTxTypeCall, appID, nil
+ }
+
+ _, _, optedInPending := g.getAppData(false /* pending */, kind, senderIndex, appID)
+ if optedInPending {
+ // about to get opted in, but can't optin twice or call yet, so create:
+ return getAppTxType(kind, appTxTypeCreate), kind, appTxTypeCreate, appID, nil
+ }
+ // not opted in or pending, so optin:
+ return txType, kind, appTxType, appID, nil
+ }
+
+ if appTxType != appTxTypeCall {
+ return "", 0, 0, 0, fmt.Errorf("unimplemented transaction type for app %s from %s", appTxType, txType)
+ }
+ // WLOG appTxTypeCall:
+
+ numAppsOptedin := uint64(len(g.accountAppOptins[kind][senderIndex]))
+ if numAppsOptedin == 0 {
+ // try again calling recursively but attempting to optin:
+ return g.getActualAppCall(getAppTxType(kind, appTxTypeOptin), senderIndex)
+ }
+ // WLOG appTxTypeCall with available optins:
+
+ appID := g.accountAppOptins[kind][senderIndex][rand.Uint64()%numAppsOptedin]
+ return txType, kind, appTxType, appID, nil
+}
diff --git a/tools/block-generator/generator/generate_test.go b/tools/block-generator/generator/generate_test.go
index dddcf1758..4ae79d9e7 100644
--- a/tools/block-generator/generator/generate_test.go
+++ b/tools/block-generator/generator/generate_test.go
@@ -22,10 +22,13 @@ import (
"net/http"
"net/http/httptest"
"testing"
+ "time"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/rpcs"
"github.com/algorand/go-algorand/test/partitiontest"
@@ -33,13 +36,16 @@ import (
)
func makePrivateGenerator(t *testing.T, round uint64, genesis bookkeeping.Genesis) *generator {
- publicGenerator, err := MakeGenerator(round, genesis, GenerationConfig{
+ cfg := GenerationConfig{
+ Name: "test",
NumGenesisAccounts: 10,
GenesisAccountInitialBalance: 1000000000000,
PaymentTransactionFraction: 1.0,
PaymentNewAccountFraction: 1.0,
AssetCreateFraction: 1.0,
- })
+ }
+ cfg.validateWithDefaults(true)
+ publicGenerator, err := MakeGenerator(round, genesis, cfg, true)
require.NoError(t, err)
return publicGenerator.(*generator)
}
@@ -63,7 +69,8 @@ func TestAssetXferNoAssetsOverride(t *testing.T) {
g := makePrivateGenerator(t, 0, bookkeeping.Genesis{})
// First asset transaction must create.
- actual, txn := g.generateAssetTxnInternal(assetXfer, 1, 0)
+ actual, txn, assetID := g.generateAssetTxnInternal(assetXfer, 1, 0)
+ require.NotEqual(t, 0, assetID)
require.Equal(t, assetCreate, actual)
require.Equal(t, protocol.AssetConfigTx, txn.Type)
require.Len(t, g.assets, 0)
@@ -75,12 +82,13 @@ func TestAssetXferNoAssetsOverride(t *testing.T) {
func TestAssetXferOneHolderOverride(t *testing.T) {
partitiontest.PartitionTest(t)
g := makePrivateGenerator(t, 0, bookkeeping.Genesis{})
- g.finishRound(0)
+ g.finishRound()
g.generateAssetTxnInternal(assetCreate, 1, 0)
- g.finishRound(1)
+ g.finishRound()
// Transfer converted to optin if there is only 1 holder.
- actual, txn := g.generateAssetTxnInternal(assetXfer, 2, 0)
+ actual, txn, assetID := g.generateAssetTxnInternal(assetXfer, 2, 0)
+ require.NotEqual(t, 0, assetID)
require.Equal(t, assetOptin, actual)
require.Equal(t, protocol.AssetTransferTx, txn.Type)
require.Len(t, g.assets, 1)
@@ -92,12 +100,13 @@ func TestAssetXferOneHolderOverride(t *testing.T) {
func TestAssetCloseCreatorOverride(t *testing.T) {
partitiontest.PartitionTest(t)
g := makePrivateGenerator(t, 0, bookkeeping.Genesis{})
- g.finishRound(0)
+ g.finishRound()
g.generateAssetTxnInternal(assetCreate, 1, 0)
- g.finishRound(1)
+ g.finishRound()
// Instead of closing the creator, optin a new account
- actual, txn := g.generateAssetTxnInternal(assetClose, 2, 0)
+ actual, txn, assetID := g.generateAssetTxnInternal(assetClose, 2, 0)
+ require.NotEqual(t, 0, assetID)
require.Equal(t, assetOptin, actual)
require.Equal(t, protocol.AssetTransferTx, txn.Type)
require.Len(t, g.assets, 1)
@@ -109,29 +118,32 @@ func TestAssetCloseCreatorOverride(t *testing.T) {
func TestAssetOptinEveryAccountOverride(t *testing.T) {
partitiontest.PartitionTest(t)
g := makePrivateGenerator(t, 0, bookkeeping.Genesis{})
- g.finishRound(0)
+ g.finishRound()
g.generateAssetTxnInternal(assetCreate, 1, 0)
- g.finishRound(1)
+ g.finishRound()
// Opt all the accounts in, this also verifies that no account is opted in twice
var txn transactions.Transaction
var actual TxTypeID
+ var assetID uint64
for i := 2; uint64(i) <= g.numAccounts; i++ {
- actual, txn = g.generateAssetTxnInternal(assetOptin, 2, uint64(1+i))
+ actual, txn, assetID = g.generateAssetTxnInternal(assetOptin, 2, uint64(1+i))
+ require.NotEqual(t, 0, assetID)
require.Equal(t, assetOptin, actual)
require.Equal(t, protocol.AssetTransferTx, txn.Type)
require.Len(t, g.assets, 1)
require.Len(t, g.assets[0].holdings, i)
require.Len(t, g.assets[0].holders, i)
}
- g.finishRound(2)
+ g.finishRound()
// All accounts have opted in
require.Equal(t, g.numAccounts, uint64(len(g.assets[0].holdings)))
// The next optin closes instead
- actual, txn = g.generateAssetTxnInternal(assetOptin, 3, 0)
- g.finishRound(3)
+ actual, txn, assetID = g.generateAssetTxnInternal(assetOptin, 3, 0)
+ require.Greater(t, assetID, uint64(0))
+ g.finishRound()
require.Equal(t, assetClose, actual)
require.Equal(t, protocol.AssetTransferTx, txn.Type)
require.Len(t, g.assets, 1)
@@ -142,17 +154,18 @@ func TestAssetOptinEveryAccountOverride(t *testing.T) {
func TestAssetDestroyWithHoldingsOverride(t *testing.T) {
partitiontest.PartitionTest(t)
g := makePrivateGenerator(t, 0, bookkeeping.Genesis{})
- g.finishRound(0)
+ g.finishRound()
g.generateAssetTxnInternal(assetCreate, 1, 0)
- g.finishRound(1)
+ g.finishRound()
g.generateAssetTxnInternal(assetOptin, 2, 0)
- g.finishRound(2)
+ g.finishRound()
g.generateAssetTxnInternal(assetXfer, 3, 0)
- g.finishRound(3)
+ g.finishRound()
require.Len(t, g.assets[0].holdings, 2)
require.Len(t, g.assets[0].holders, 2)
- actual, txn := g.generateAssetTxnInternal(assetDestroy, 4, 0)
+ actual, txn, assetID := g.generateAssetTxnInternal(assetDestroy, 4, 0)
+ require.NotEqual(t, 0, assetID)
require.Equal(t, assetClose, actual)
require.Equal(t, protocol.AssetTransferTx, txn.Type)
require.Len(t, g.assets, 1)
@@ -163,31 +176,267 @@ func TestAssetDestroyWithHoldingsOverride(t *testing.T) {
func TestAssetTransfer(t *testing.T) {
partitiontest.PartitionTest(t)
g := makePrivateGenerator(t, 0, bookkeeping.Genesis{})
- g.finishRound(0)
+ g.finishRound()
g.generateAssetTxnInternal(assetCreate, 1, 0)
- g.finishRound(1)
+ g.finishRound()
g.generateAssetTxnInternal(assetOptin, 2, 0)
- g.finishRound(2)
+ g.finishRound()
g.generateAssetTxnInternal(assetXfer, 3, 0)
- g.finishRound(3)
- require.Greater(t, g.assets[0].holdings[1].balance, uint64(0))
+ g.finishRound()
+ require.NotEqual(t, g.assets[0].holdings[1].balance, uint64(0))
}
func TestAssetDestroy(t *testing.T) {
partitiontest.PartitionTest(t)
g := makePrivateGenerator(t, 0, bookkeeping.Genesis{})
- g.finishRound(0)
+ g.finishRound()
g.generateAssetTxnInternal(assetCreate, 1, 0)
- g.finishRound(1)
+ g.finishRound()
require.Len(t, g.assets, 1)
- actual, txn := g.generateAssetTxnInternal(assetDestroy, 2, 0)
+ actual, txn, assetID := g.generateAssetTxnInternal(assetDestroy, 2, 0)
+ require.NotEqual(t, 0, assetID)
require.Equal(t, assetDestroy, actual)
require.Equal(t, protocol.AssetConfigTx, txn.Type)
require.Len(t, g.assets, 0)
}
+type assembledPrograms struct {
+ boxesApproval []byte
+ boxesClear []byte
+ swapsApproval []byte
+ swapsClear []byte
+}
+
+func assembleApps(t *testing.T) assembledPrograms {
+ t.Helper()
+
+ ap := assembledPrograms{}
+
+ ops, err := logic.AssembleString(approvalBoxes)
+ ap.boxesApproval = ops.Program
+ require.NoError(t, err)
+ ops, err = logic.AssembleString(clearBoxes)
+ ap.boxesClear = ops.Program
+ require.NoError(t, err)
+
+ ops, err = logic.AssembleString(approvalSwap)
+ ap.swapsApproval = ops.Program
+ require.NoError(t, err)
+ ops, err = logic.AssembleString(clearSwap)
+ ap.swapsClear = ops.Program
+ require.NoError(t, err)
+
+ return ap
+}
+
+func TestAppCreate(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ g := makePrivateGenerator(t, 0, bookkeeping.Genesis{})
+ assembled := assembleApps(t)
+
+ round, intra := uint64(1337), uint64(0)
+ hint := appData{sender: 7}
+
+ // app call transaction creating appBoxes
+ actual, sgnTxns, appID, err := g.generateAppCallInternal(appBoxesCreate, round, intra, &hint)
+ _ = appID
+ require.NoError(t, err)
+ require.Equal(t, appBoxesCreate, actual)
+
+ require.Len(t, sgnTxns, 2)
+ createTxn := sgnTxns[0].Txn
+
+ require.Equal(t, indexToAccount(hint.sender), createTxn.Sender)
+ require.Equal(t, protocol.ApplicationCallTx, createTxn.Type)
+ require.Equal(t, basics.AppIndex(0), createTxn.ApplicationCallTxnFields.ApplicationID)
+ require.Equal(t, assembled.boxesApproval, createTxn.ApplicationCallTxnFields.ApprovalProgram)
+ require.Equal(t, assembled.boxesClear, createTxn.ApplicationCallTxnFields.ClearStateProgram)
+ require.Equal(t, uint64(32), createTxn.ApplicationCallTxnFields.GlobalStateSchema.NumByteSlice)
+ require.Equal(t, uint64(32), createTxn.ApplicationCallTxnFields.GlobalStateSchema.NumUint)
+ require.Equal(t, uint64(8), createTxn.ApplicationCallTxnFields.LocalStateSchema.NumByteSlice)
+ require.Equal(t, uint64(8), createTxn.ApplicationCallTxnFields.LocalStateSchema.NumUint)
+ require.Equal(t, transactions.NoOpOC, createTxn.ApplicationCallTxnFields.OnCompletion)
+
+ require.Len(t, g.pendingAppSlice[appKindBoxes], 1)
+ require.Len(t, g.pendingAppSlice[appKindSwap], 0)
+ require.Len(t, g.pendingAppMap[appKindBoxes], 1)
+ require.Len(t, g.pendingAppMap[appKindSwap], 0)
+ ad := g.pendingAppSlice[appKindBoxes][0]
+ require.Equal(t, ad, g.pendingAppMap[appKindBoxes][ad.appID])
+ require.Equal(t, hint.sender, ad.sender)
+ require.Equal(t, appKindBoxes, ad.kind)
+ optins := ad.optins
+ require.Len(t, optins, 0)
+
+ paySiblingTxn := sgnTxns[1].Txn
+ require.Equal(t, protocol.PaymentTx, paySiblingTxn.Type)
+
+ // app call transaction creating appSwap
+ intra = 1
+ actual, sgnTxns, appID, err = g.generateAppCallInternal(appSwapCreate, round, intra, &hint)
+ _ = appID
+ require.NoError(t, err)
+ require.Equal(t, appSwapCreate, actual)
+
+ require.Len(t, sgnTxns, 1)
+ createTxn = sgnTxns[0].Txn
+
+ require.Equal(t, protocol.ApplicationCallTx, createTxn.Type)
+ require.Equal(t, indexToAccount(hint.sender), createTxn.Sender)
+ require.Equal(t, basics.AppIndex(0), createTxn.ApplicationCallTxnFields.ApplicationID)
+ require.Equal(t, assembled.swapsApproval, createTxn.ApplicationCallTxnFields.ApprovalProgram)
+ require.Equal(t, assembled.swapsClear, createTxn.ApplicationCallTxnFields.ClearStateProgram)
+ require.Equal(t, uint64(32), createTxn.ApplicationCallTxnFields.GlobalStateSchema.NumByteSlice)
+ require.Equal(t, uint64(32), createTxn.ApplicationCallTxnFields.GlobalStateSchema.NumUint)
+ require.Equal(t, uint64(8), createTxn.ApplicationCallTxnFields.LocalStateSchema.NumByteSlice)
+ require.Equal(t, uint64(8), createTxn.ApplicationCallTxnFields.LocalStateSchema.NumUint)
+ require.Equal(t, transactions.NoOpOC, createTxn.ApplicationCallTxnFields.OnCompletion)
+
+ require.Len(t, g.pendingAppSlice[appKindBoxes], 1)
+ require.Len(t, g.pendingAppSlice[appKindSwap], 1)
+ require.Len(t, g.pendingAppMap[appKindBoxes], 1)
+ require.Len(t, g.pendingAppMap[appKindSwap], 1)
+ ad = g.pendingAppSlice[appKindSwap][0]
+ require.Equal(t, ad, g.pendingAppMap[appKindSwap][ad.appID])
+ require.Equal(t, hint.sender, ad.sender)
+ require.Equal(t, appKindSwap, ad.kind)
+ optins = ad.optins
+ require.Len(t, optins, 0)
+}
+
+func TestAppBoxesOptin(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ g := makePrivateGenerator(t, 0, bookkeeping.Genesis{})
+ assembled := assembleApps(t)
+
+ round, intra := uint64(1337), uint64(0)
+
+ hint := appData{sender: 7}
+
+ // app call transaction opting into boxes gets replaced by creating appBoxes
+ g.startRound()
+ actual, sgnTxns, appID, err := g.generateAppCallInternal(appBoxesOptin, round, intra, &hint)
+ _ = appID
+ require.NoError(t, err)
+ require.Equal(t, appBoxesCreate, actual)
+
+ require.Len(t, sgnTxns, 2)
+ createTxn := sgnTxns[0].Txn
+
+ require.Equal(t, protocol.ApplicationCallTx, createTxn.Type)
+ require.Equal(t, indexToAccount(hint.sender), createTxn.Sender)
+ require.Equal(t, basics.AppIndex(0), createTxn.ApplicationCallTxnFields.ApplicationID)
+ require.Equal(t, assembled.boxesApproval, createTxn.ApplicationCallTxnFields.ApprovalProgram)
+ require.Equal(t, assembled.boxesClear, createTxn.ApplicationCallTxnFields.ClearStateProgram)
+ require.Equal(t, uint64(32), createTxn.ApplicationCallTxnFields.GlobalStateSchema.NumByteSlice)
+ require.Equal(t, uint64(32), createTxn.ApplicationCallTxnFields.GlobalStateSchema.NumUint)
+ require.Equal(t, uint64(8), createTxn.ApplicationCallTxnFields.LocalStateSchema.NumByteSlice)
+ require.Equal(t, uint64(8), createTxn.ApplicationCallTxnFields.LocalStateSchema.NumUint)
+ require.Equal(t, transactions.NoOpOC, createTxn.ApplicationCallTxnFields.OnCompletion)
+ require.Nil(t, createTxn.ApplicationCallTxnFields.Boxes)
+
+ require.Len(t, g.pendingAppSlice[appKindBoxes], 1)
+ require.Len(t, g.pendingAppSlice[appKindSwap], 0)
+ require.Len(t, g.pendingAppMap[appKindBoxes], 1)
+ require.Len(t, g.pendingAppMap[appKindSwap], 0)
+ ad := g.pendingAppSlice[appKindBoxes][0]
+ require.Equal(t, ad, g.pendingAppMap[appKindBoxes][ad.appID])
+ require.Equal(t, hint.sender, ad.sender)
+ require.Equal(t, appKindBoxes, ad.kind)
+ require.Len(t, ad.optins, 0)
+
+ require.Contains(t, effects, actual)
+
+ paySiblingTxn := sgnTxns[1].Txn
+ require.Equal(t, protocol.PaymentTx, paySiblingTxn.Type)
+
+ g.finishRound()
+ // 2nd attempt to optin (with new sender) doesn't get replaced
+ g.startRound()
+ intra += 1
+ hint.sender = 8
+
+ actual, sgnTxns, appID, err = g.generateAppCallInternal(appBoxesOptin, round, intra, &hint)
+ _ = appID
+ require.NoError(t, err)
+ require.Equal(t, appBoxesOptin, actual)
+
+ require.Len(t, sgnTxns, 2)
+ pay := sgnTxns[1].Txn
+ require.Equal(t, protocol.PaymentTx, pay.Type)
+ require.NotEqual(t, basics.Address{}.String(), pay.Sender.String())
+
+ createTxn = sgnTxns[0].Txn
+ require.Equal(t, protocol.ApplicationCallTx, createTxn.Type)
+ require.Equal(t, indexToAccount(hint.sender), createTxn.Sender)
+ require.Equal(t, basics.AppIndex(1001), createTxn.ApplicationCallTxnFields.ApplicationID)
+ require.Equal(t, []byte(nil), createTxn.ApplicationCallTxnFields.ApprovalProgram)
+ require.Equal(t, []byte(nil), createTxn.ApplicationCallTxnFields.ClearStateProgram)
+ require.Equal(t, basics.StateSchema{}, createTxn.ApplicationCallTxnFields.GlobalStateSchema)
+ require.Equal(t, basics.StateSchema{}, createTxn.ApplicationCallTxnFields.LocalStateSchema)
+ require.Equal(t, transactions.OptInOC, createTxn.ApplicationCallTxnFields.OnCompletion)
+ require.Len(t, createTxn.ApplicationCallTxnFields.Boxes, 1)
+ require.Equal(t, crypto.Digest(pay.Sender).ToSlice(), createTxn.ApplicationCallTxnFields.Boxes[0].Name)
+
+ require.Len(t, g.pendingAppSlice[appKindBoxes], 1)
+ require.Len(t, g.pendingAppSlice[appKindSwap], 0)
+ require.Len(t, g.pendingAppMap[appKindBoxes], 1)
+ require.Len(t, g.pendingAppMap[appKindSwap], 0)
+ ad = g.pendingAppSlice[appKindBoxes][0]
+ require.Equal(t, ad, g.pendingAppMap[appKindBoxes][ad.appID])
+ require.Equal(t, hint.sender, ad.sender) // NOT 8!!!
+ require.Equal(t, appKindBoxes, ad.kind)
+ optins := ad.optins
+ require.Len(t, optins, 1)
+ require.Contains(t, optins, hint.sender)
+
+ require.Contains(t, effects, actual)
+ require.Len(t, effects[actual], 2)
+ require.Equal(t, TxEffect{effectPaymentTxSibling, 1}, effects[actual][0])
+ require.Equal(t, TxEffect{effectInnerTx, 2}, effects[actual][1])
+
+ numTxns := 1 + countEffects(actual)
+ require.Equal(t, uint64(4), numTxns)
+
+ g.finishRound()
+ // 3rd attempt to optin gets replaced by vanilla app call
+ g.startRound()
+ intra += numTxns
+
+ actual, sgnTxns, appID, err = g.generateAppCallInternal(appBoxesOptin, round, intra, &hint)
+ _ = appID
+ require.NoError(t, err)
+ require.Equal(t, appBoxesCall, actual)
+
+ require.Len(t, sgnTxns, 1)
+
+ createTxn = sgnTxns[0].Txn
+ require.Equal(t, protocol.ApplicationCallTx, createTxn.Type)
+ require.Equal(t, indexToAccount(hint.sender), createTxn.Sender)
+ require.Equal(t, basics.AppIndex(1001), createTxn.ApplicationCallTxnFields.ApplicationID)
+ require.Equal(t, []byte(nil), createTxn.ApplicationCallTxnFields.ApprovalProgram)
+ require.Equal(t, []byte(nil), createTxn.ApplicationCallTxnFields.ClearStateProgram)
+ require.Equal(t, basics.StateSchema{}, createTxn.ApplicationCallTxnFields.GlobalStateSchema)
+ require.Equal(t, basics.StateSchema{}, createTxn.ApplicationCallTxnFields.LocalStateSchema)
+ require.Equal(t, transactions.NoOpOC, createTxn.ApplicationCallTxnFields.OnCompletion)
+ require.Len(t, createTxn.ApplicationCallTxnFields.Boxes, 1)
+ require.Equal(t, crypto.Digest(pay.Sender).ToSlice(), createTxn.ApplicationCallTxnFields.Boxes[0].Name)
+
+ // no change to app states
+ require.Len(t, g.pendingAppSlice[appKindBoxes], 0)
+ require.Len(t, g.pendingAppSlice[appKindSwap], 0)
+ require.Len(t, g.pendingAppMap[appKindBoxes], 0)
+ require.Len(t, g.pendingAppMap[appKindSwap], 0)
+
+ require.NotContains(t, effects, actual)
+}
+
func TestWriteRoundZero(t *testing.T) {
partitiontest.PartitionTest(t)
var testcases = []struct {
@@ -211,7 +460,7 @@ func TestWriteRoundZero(t *testing.T) {
}
for _, tc := range testcases {
tc := tc
- t.Run(fmt.Sprintf("%s", tc.name), func(t *testing.T) {
+ t.Run(tc.name, func(t *testing.T) {
t.Parallel()
g := makePrivateGenerator(t, tc.dbround, tc.genesis)
var data []byte
@@ -229,21 +478,75 @@ func TestWriteRoundZero(t *testing.T) {
func TestWriteRound(t *testing.T) {
partitiontest.PartitionTest(t)
g := makePrivateGenerator(t, 0, bookkeeping.Genesis{})
- var data []byte
- writer := bytes.NewBuffer(data)
- g.WriteBlock(writer, 0)
- g.WriteBlock(writer, 1)
- var block rpcs.EncodedBlockCert
- protocol.Decode(data, &block)
- require.Len(t, block.Block.Payset, int(g.config.TxnPerBlock))
+
+ prepBuffer := func() (*bytes.Buffer, rpcs.EncodedBlockCert) {
+ return bytes.NewBuffer([]byte{}), rpcs.EncodedBlockCert{}
+ }
+
+ // Initial conditions of g from makePrivateGenerator:
+ require.Equal(t, uint64(0), g.round)
+
+ // Round 0:
+ blockBuff, block0_1 := prepBuffer()
+ err := g.WriteBlock(blockBuff, 0)
+ require.NoError(t, err)
+
+ require.Equal(t, uint64(1), g.round)
+ protocol.Decode(blockBuff.Bytes(), &block0_1)
+ require.Equal(t, "blockgen-test", block0_1.Block.BlockHeader.GenesisID)
+ require.Equal(t, basics.Round(0), block0_1.Block.BlockHeader.Round)
+ require.NotNil(t, g.ledger)
+ require.Equal(t, basics.Round(0), g.ledger.Latest())
+
+ // WriteBlocks only advances the _internal_ round
+ // the first time called for a particular _given_ round
+ blockBuff, block0_2 := prepBuffer()
+ err = g.WriteBlock(blockBuff, 0)
+ require.NoError(t, err)
+ require.Equal(t, uint64(1), g.round)
+ protocol.Decode(blockBuff.Bytes(), &block0_2)
+ require.Equal(t, block0_1, block0_2)
+ require.NotNil(t, g.ledger)
+ require.Equal(t, basics.Round(0), g.ledger.Latest())
+
+ blockBuff, block0_3 := prepBuffer()
+ err = g.WriteBlock(blockBuff, 0)
+ require.NoError(t, err)
+ require.Equal(t, uint64(1), g.round)
+ protocol.Decode(blockBuff.Bytes(), &block0_3)
+ require.Equal(t, block0_1, block0_3)
+ require.NotNil(t, g.ledger)
+ require.Equal(t, basics.Round(0), g.ledger.Latest())
+
+ // Round 1:
+ blockBuff, block1_1 := prepBuffer()
+ err = g.WriteBlock(blockBuff, 1)
+ require.NoError(t, err)
+ require.Equal(t, uint64(2), g.round)
+ protocol.Decode(blockBuff.Bytes(), &block1_1)
+ require.Equal(t, "blockgen-test", block1_1.Block.BlockHeader.GenesisID)
+ require.Equal(t, basics.Round(1), block1_1.Block.BlockHeader.Round)
+ require.Len(t, block1_1.Block.Payset, int(g.config.TxnPerBlock))
+ require.NotNil(t, g.ledger)
+ require.Equal(t, basics.Round(1), g.ledger.Latest())
+ _, err = g.ledger.GetStateDeltaForRound(1)
+ require.NoError(t, err)
+
+ blockBuff, block1_2 := prepBuffer()
+ err = g.WriteBlock(blockBuff, 1)
+ require.NoError(t, err)
+ require.Equal(t, uint64(2), g.round)
+ protocol.Decode(blockBuff.Bytes(), &block1_2)
+ require.Equal(t, block1_1, block1_2)
require.NotNil(t, g.ledger)
require.Equal(t, basics.Round(1), g.ledger.Latest())
- _, err := g.ledger.GetStateDeltaForRound(1)
+ _, err = g.ledger.GetStateDeltaForRound(1)
require.NoError(t, err)
+
// request a block that is several rounds ahead of the current round
- err = g.WriteBlock(writer, 10)
+ err = g.WriteBlock(blockBuff, 10)
require.NotNil(t, err)
- require.Equal(t, err.Error(), "generator only supports sequential block access. Expected 2 but received request for 10")
+ require.Equal(t, err.Error(), "generator only supports sequential block access. Expected 1 or 2 but received request for 10")
}
func TestWriteRoundWithPreloadedDB(t *testing.T) {
@@ -260,7 +563,6 @@ func TestWriteRoundWithPreloadedDB(t *testing.T) {
dbround: 1,
round: 1,
genesis: bookkeeping.Genesis{Network: "generator-test1"},
- err: nil,
},
{
name: "invalid request",
@@ -274,28 +576,27 @@ func TestWriteRoundWithPreloadedDB(t *testing.T) {
dbround: 1,
round: 10,
genesis: bookkeeping.Genesis{Network: "generator-test3"},
- err: fmt.Errorf("generator only supports sequential block access. Expected 2 but received request for 10"),
+ err: fmt.Errorf("generator only supports sequential block access. Expected 1 or 2 but received request for 10"),
},
{
name: "preloaded database starting at 10",
dbround: 10,
round: 11,
genesis: bookkeeping.Genesis{Network: "generator-test4"},
- err: nil,
},
{
name: "preloaded database request round 20",
dbround: 10,
round: 20,
genesis: bookkeeping.Genesis{Network: "generator-test5"},
- err: nil,
},
}
for _, tc := range testcases {
tc := tc
- t.Run(fmt.Sprintf("%s", tc.name), func(t *testing.T) {
- t.Parallel()
+ t.Run(tc.name, func(t *testing.T) {
+ // No t.Parallel() here, to avoid contention in the ledger
g := makePrivateGenerator(t, tc.dbround, tc.genesis)
+
defer g.ledger.Close()
var data []byte
writer := bytes.NewBuffer(data)
@@ -305,7 +606,7 @@ func TestWriteRoundWithPreloadedDB(t *testing.T) {
if tc.round != tc.dbround && tc.err != nil {
err = g.WriteBlock(writer, tc.round)
require.NotNil(t, err)
- require.Equal(t, err.Error(), tc.err.Error())
+ require.Equal(t, tc.err.Error(), err.Error())
return
}
// write the rest of the blocks
@@ -353,7 +654,9 @@ func TestHandlers(t *testing.T) {
}
for _, testcase := range testcases {
+ testcase := testcase
t.Run(testcase.name, func(t *testing.T) {
+ t.Parallel()
req := httptest.NewRequest("GET", testcase.url, nil)
w := httptest.NewRecorder()
handler(w, req)
@@ -362,3 +665,132 @@ func TestHandlers(t *testing.T) {
})
}
}
+
+func TestRecordData(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ gen := makePrivateGenerator(t, 0, bookkeeping.Genesis{})
+
+ id := TxTypeID("test")
+ data, ok := gen.reportData[id]
+ require.False(t, ok)
+
+ gen.recordData(id, time.Now())
+ data, ok = gen.reportData[id]
+ require.True(t, ok)
+ require.Equal(t, uint64(1), data.GenerationCount)
+
+ gen.recordData(id, time.Now())
+ data, ok = gen.reportData[id]
+ require.True(t, ok)
+ require.Equal(t, uint64(2), data.GenerationCount)
+}
+
+// TestEffectsMap is a sanity check that asserts that the effects map
+// has exactly the number of consequences that we expect.
+func TestEffectsMap(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ require.Len(t, effects, 2)
+ txId := TxTypeID("DNE")
+ _, ok := effects[txId]
+ require.False(t, ok)
+ require.Equal(t, uint64(0), countEffects(txId))
+
+ txId = appBoxesCreate
+ data, ok := effects[txId]
+ require.True(t, ok)
+ require.Len(t, data, 1)
+ effect := data[0]
+ require.Equal(t, uint64(1), effect.count)
+ require.Contains(t, effect.effect, "sibling")
+ require.Equal(t, uint64(1), countEffects(txId))
+
+ txId = appBoxesOptin
+ data, ok = effects[txId]
+ require.True(t, ok)
+ require.Len(t, data, 2)
+ effect = data[0]
+ require.Equal(t, uint64(1), effect.count)
+ require.Contains(t, effect.effect, "sibling")
+ effect = data[1]
+ require.Equal(t, uint64(2), effect.count)
+ require.Contains(t, effect.effect, "inner")
+ require.Equal(t, uint64(3), countEffects(txId))
+}
+
+func TestCumulativeEffects(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ report := Report{
+ TxTypeID("app_boxes_optin"): {GenerationCount: uint64(42)},
+ TxTypeID("app_boxes_create"): {GenerationCount: uint64(1337)},
+ TxTypeID("pay_pay"): {GenerationCount: uint64(999)},
+ TxTypeID("asset_optin_total"): {GenerationCount: uint64(13)},
+ TxTypeID("app_boxes_call"): {GenerationCount: uint64(413)},
+ }
+
+ expectedEffectsReport := EffectsReport{
+ "app_boxes_optin": uint64(42),
+ "app_boxes_create": uint64(1337),
+ "pay_pay": uint64(999),
+ "asset_optin_total": uint64(13),
+ "app_boxes_call": uint64(413),
+ "effect_payment_sibling": uint64(42) + uint64(1337),
+ "effect_inner_tx": uint64(2 * 42),
+ }
+
+ require.Equal(t, expectedEffectsReport, CumulativeEffects(report))
+}
+
+func TestCountInners(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ tests := []struct {
+ name string
+ ad transactions.ApplyData
+ want int
+ }{
+ {
+ name: "no inner transactions",
+ ad: transactions.ApplyData{},
+ want: 0,
+ },
+ {
+ name: "one level of inner transactions",
+ ad: transactions.ApplyData{
+ EvalDelta: transactions.EvalDelta{
+ InnerTxns: []transactions.SignedTxnWithAD{{}, {}, {}},
+ },
+ },
+ want: 3,
+ },
+ {
+ name: "nested inner transactions",
+ ad: transactions.ApplyData{
+ EvalDelta: transactions.EvalDelta{
+ InnerTxns: []transactions.SignedTxnWithAD{
+ {
+ ApplyData: transactions.ApplyData{
+ EvalDelta: transactions.EvalDelta{
+ InnerTxns: []transactions.SignedTxnWithAD{{}, {}},
+ },
+ },
+ },
+ {},
+ },
+ },
+ },
+ want: 4,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := countInners(tt.ad); got != tt.want {
+ t.Errorf("countInners() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
diff --git a/tools/block-generator/generator/generator_ledger.go b/tools/block-generator/generator/generator_ledger.go
new file mode 100644
index 000000000..c12906566
--- /dev/null
+++ b/tools/block-generator/generator/generator_ledger.go
@@ -0,0 +1,342 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package generator
+
+import (
+ "encoding/binary"
+ "fmt"
+ "os"
+
+ "github.com/algorand/avm-abi/apps"
+ cconfig "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/committee"
+ txn "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/ledger/eval"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/rpcs"
+)
+
+// ---- ledger block generation ----
+
+func (g *generator) setBlockHeader(cert *rpcs.EncodedBlockCert) {
+ cert.Block.BlockHeader = bookkeeping.BlockHeader{
+ Round: basics.Round(g.round),
+ TxnCounter: g.txnCounter,
+ Branch: bookkeeping.BlockHash{},
+ Seed: committee.Seed{},
+ TxnCommitments: bookkeeping.TxnCommitments{NativeSha512_256Commitment: crypto.Digest{}},
+ TimeStamp: g.timestamp,
+ GenesisID: g.genesisID,
+ GenesisHash: g.genesisHash,
+ RewardsState: bookkeeping.RewardsState{
+ FeeSink: g.feeSink,
+ RewardsPool: g.rewardsPool,
+ RewardsLevel: 0,
+ RewardsRate: 0,
+ RewardsResidue: 0,
+ RewardsRecalculationRound: 0,
+ },
+ UpgradeState: bookkeeping.UpgradeState{
+ CurrentProtocol: g.protocol,
+ },
+ UpgradeVote: bookkeeping.UpgradeVote{},
+ StateProofTracking: nil,
+ }
+}
+
+
+// ---- ledger simulation and introspection ----
+
+// initializeLedger creates a new ledger
+func (g *generator) initializeLedger() {
+ genBal := convertToGenesisBalances(g.balances)
+ // add rewards pool with min balance
+ genBal[g.rewardsPool] = basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: g.params.MinBalance},
+ }
+ bal := bookkeeping.MakeGenesisBalances(genBal, g.feeSink, g.rewardsPool)
+ block, err := bookkeeping.MakeGenesisBlock(g.protocol, bal, g.genesisID, g.genesisHash)
+ if err != nil {
+ fmt.Printf("error making genesis: %v\n.", err)
+ os.Exit(1)
+ }
+ var prefix string
+ if g.genesisID == "" {
+ prefix = "block-generator"
+ } else {
+ prefix = g.genesisID
+ }
+ l, err := ledger.OpenLedger(logging.Base(), prefix, true, ledgercore.InitState{
+ Block: block,
+ Accounts: bal.Balances,
+ GenesisHash: g.genesisHash,
+ }, cconfig.GetDefaultLocal())
+ if err != nil {
+ fmt.Printf("error initializing ledger: %v\n.", err)
+ os.Exit(1)
+ }
+ g.ledger = l
+}
+
+func (g *generator) minTxnsForBlock(round uint64) uint64 {
+ // There are no transactions in the 0th round
+ if round == 0 {
+ return 0
+ }
+ return g.config.TxnPerBlock
+}
+
+// startRound updates the generator's txnCounter based on the latest block header's counter.
+// It is assumed that g.round has already been incremented in finishRound()
+func (g *generator) startRound() error {
+ var latestRound basics.Round
+ if g.round > 0 {
+ latestRound = basics.Round(g.round - 1)
+ } else {
+ latestRound = basics.Round(0)
+ }
+
+ latestHeader, err := g.ledger.BlockHdr(latestRound)
+ if err != nil {
+ return fmt.Errorf("could not obtain block header for round %d (latest round %d): %w", g.round, latestRound, err)
+ }
+
+ g.txnCounter = latestHeader.TxnCounter
+ return nil
+}
+
+// finishRound tells the generator it can apply any pending state and updates its round
+func (g *generator) finishRound() {
+ g.timestamp += consensusTimeMilli
+ g.round++
+
+ // Apply pending assets...
+ g.assets = append(g.assets, g.pendingAssets...)
+ g.pendingAssets = nil
+
+ g.latestPaysetWithExpectedID = nil
+ g.latestData = make(map[TxTypeID]uint64)
+
+ for kind, pendingAppSlice := range g.pendingAppSlice {
+ for _, pendingApp := range pendingAppSlice {
+ appID := pendingApp.appID
+ if g.appMap[kind][appID] == nil {
+ g.appSlice[kind] = append(g.appSlice[kind], pendingApp)
+ g.appMap[kind][appID] = pendingApp
+ for sender := range pendingApp.optins {
+ g.accountAppOptins[kind][sender] = append(g.accountAppOptins[kind][sender], appID)
+ }
+ } else { // just union the optins when already exists
+ for sender := range pendingApp.optins {
+ g.appMap[kind][appID].optins[sender] = true
+ g.accountAppOptins[kind][sender] = append(g.accountAppOptins[kind][sender], appID)
+ }
+ }
+ }
+ }
+ g.resetPendingApps()
+}
+
+// ---- ledger block evaluator ----
+
+func (g *generator) startEvaluator(hdr bookkeeping.BlockHeader, paysetHint int) (*eval.BlockEvaluator, error) {
+ return eval.StartEvaluator(g.ledger, hdr,
+ eval.EvaluatorOptions{
+ PaysetHint: paysetHint,
+ Generate: true,
+ Validate: false,
+ MaxTxnBytesPerBlock: 0,
+ Tracer: nil,
+ })
+}
+
+func (g *generator) evaluateBlock(hdr bookkeeping.BlockHeader, txGroups [][]txn.SignedTxnWithAD, paysetHint int) (*ledgercore.ValidatedBlock, uint64 /* txnCount */, error) {
+ eval, err := g.startEvaluator(hdr, paysetHint)
+ if err != nil {
+ return nil, 0, fmt.Errorf("could not start evaluator: %w", err)
+ }
+ for i, txGroup := range txGroups {
+ err := eval.TransactionGroup(txGroup)
+ if err != nil {
+ return nil, 0, fmt.Errorf("could not evaluate transaction group %d: %w", i, err)
+ }
+ }
+ lvb, err := eval.GenerateBlock()
+ return lvb, eval.TestingTxnCounter(), err
+}
+
+func countInners(ad txn.ApplyData) int {
+ result := 0
+ for _, itxn := range ad.EvalDelta.InnerTxns {
+ result += 1 + countInners(itxn.ApplyData)
+ }
+ return result
+}
+
+// introspectLedgerVsGenerator is only called when the --verbose command line argument is specified.
+func (g *generator) introspectLedgerVsGenerator(roundNumber, intra uint64) (errs []error) {
+ if !g.verbose {
+ errs = append(errs, fmt.Errorf("introspectLedgerVsGenerator called when verbose=false"))
+ }
+
+ round := basics.Round(roundNumber)
+ block, err := g.ledger.Block(round)
+ if err != nil {
+ round = err.(ledgercore.ErrNoEntry).Committed
+ fmt.Printf("WARNING: inconsistent generator v. ledger state. Reset round=%d: %v\n", round, err)
+ errs = append(errs, err)
+ }
+
+ payset := block.Payset
+ nonEmptyApplyDataIndices := make([]uint64, 0)
+ emptyAd := txn.ApplyData{}
+ innerTxnCount := 0
+ for i, sgnTxn := range payset {
+ ad := sgnTxn.ApplyData
+ if ad.Equal(emptyAd) {
+ continue
+ }
+ nonEmptyApplyDataIndices = append(nonEmptyApplyDataIndices, uint64(i))
+ innerTxnCount += countInners(ad)
+ }
+
+ ledgerStateDeltas, err := g.ledger.GetStateDeltaForRound(round)
+ if err != nil {
+ errs = append(errs, err)
+ }
+
+ cumulative := CumulativeEffects(g.reportData)
+
+ sum := uint64(0)
+ for effect, cnt := range cumulative {
+ if TxTypeID(effect) == genesis {
+ continue
+ }
+ sum += cnt
+ }
+ fmt.Print("--------------------\n")
+ fmt.Printf("roundNumber (generator): %d\n", roundNumber)
+ fmt.Printf("round (ledger): %d\n", round)
+ fmt.Printf("g.txnCounter + intra: %d\n", g.txnCounter+intra)
+ fmt.Printf("block.BlockHeader.TxnCounter: %d\n", block.BlockHeader.TxnCounter)
+ fmt.Printf("len(g.latestPaysetWithExpectedID): %d\n", len(g.latestPaysetWithExpectedID))
+ fmt.Printf("len(block.Payset): %d\n", len(payset))
+ fmt.Printf("len(nonEmptyApplyDataIndices): %d\n", len(nonEmptyApplyDataIndices))
+ fmt.Printf("innerTxnCount: %d\n", innerTxnCount)
+ fmt.Printf("g.latestData: %+v\n", g.latestData)
+ fmt.Printf("cumuluative : %+v\n", cumulative)
+ fmt.Printf("all txn sum: %d\n", sum)
+ fmt.Print("--------------------\n")
+
+ // ---- FROM THE LEDGER: box and createable evidence ---- //
+
+ ledgerBoxEvidenceCount := 0
+ ledgerBoxEvidence := make(map[uint64][]uint64)
+ boxes := ledgerStateDeltas.KvMods
+ for k := range boxes {
+ appID, nameIEsender, _ := apps.SplitBoxKey(k)
+ ledgerBoxEvidence[appID] = append(ledgerBoxEvidence[appID], binary.LittleEndian.Uint64([]byte(nameIEsender))-1)
+ ledgerBoxEvidenceCount++
+ }
+
+ // TODO: can get richer info about app-Creatables from:
+ // updates.Accts.AppResources
+ ledgerCreatableAppsEvidence := make(map[uint64]uint64)
+ for creatableID, creatable := range ledgerStateDeltas.Creatables {
+ if creatable.Ctype == basics.AppCreatable {
+ ledgerCreatableAppsEvidence[uint64(creatableID)] = accountToIndex(creatable.Creator)
+ }
+ }
+ fmt.Printf("ledgerBoxEvidenceCount: %d\n", ledgerBoxEvidenceCount)
+ fmt.Printf("ledgerCreatableAppsEvidence: %d\n", len(ledgerCreatableAppsEvidence))
+
+ // ---- FROM THE GENERATOR: expected created and optins ---- //
+
+ expectedCreated := map[appKind]map[uint64]uint64{
+ appKindBoxes: make(map[uint64]uint64),
+ appKindSwap: make(map[uint64]uint64),
+ }
+ expectedOptins := map[appKind]map[uint64]map[uint64]bool{
+ appKindBoxes: make(map[uint64]map[uint64]bool),
+ appKindSwap: make(map[uint64]map[uint64]bool),
+ }
+
+ expectedOptinsCount := 0
+ for kind, appMap := range g.pendingAppMap {
+ for appID, ad := range appMap {
+ if len(ad.optins) > 0 {
+ expectedOptins[kind][appID] = ad.optins
+ expectedOptinsCount += len(ad.optins)
+ } else {
+ expectedCreated[kind][appID] = ad.sender
+ }
+ }
+ }
+ fmt.Printf("expectedCreatedCount: %d\n", len(expectedCreated[appKindBoxes]))
+ fmt.Printf("expectedOptinsCount: %d\n", expectedOptinsCount)
+
+ // ---- COMPARE LEDGER AND GENERATOR EVIDENCE ---- //
+
+ ledgerCreatablesUnexpected := map[uint64]uint64{}
+ for creatableID, creator := range ledgerCreatableAppsEvidence {
+ if expectedCreated[appKindSwap][creatableID] != creator && expectedCreated[appKindBoxes][creatableID] != creator {
+ ledgerCreatablesUnexpected[creatableID] = creator
+ }
+ }
+ generatorExpectedCreatablesNotFound := map[uint64]uint64{}
+ for creatableID, creator := range expectedCreated[appKindBoxes] {
+ if ledgerCreatableAppsEvidence[creatableID] != creator {
+ generatorExpectedCreatablesNotFound[creatableID] = creator
+ }
+ }
+
+ ledgerBoxOptinsUnexpected := map[uint64][]uint64{}
+ for appId, boxOptins := range ledgerBoxEvidence {
+ for _, optin := range boxOptins {
+ if _, ok := expectedOptins[appKindBoxes][appId][optin]; !ok {
+ ledgerBoxOptinsUnexpected[appId] = append(ledgerBoxOptinsUnexpected[appId], optin)
+ }
+ }
+ }
+
+ generatorExpectedOptinsNotFound := map[uint64][]uint64{}
+ for appId, appOptins := range expectedOptins[appKindBoxes] {
+ for optin := range appOptins {
+ missing := true
+ for _, boxOptin := range ledgerBoxEvidence[appId] {
+ if boxOptin == optin {
+ missing = false
+ break
+ }
+ }
+ if missing {
+ generatorExpectedOptinsNotFound[appId] = append(generatorExpectedOptinsNotFound[appId], optin)
+ }
+ }
+ }
+
+ fmt.Printf("ledgerCreatablesUnexpected: %+v\n", ledgerCreatablesUnexpected)
+ fmt.Printf("generatorExpectedCreatablesNotFound: %+v\n", generatorExpectedCreatablesNotFound)
+ fmt.Printf("ledgerBoxOptinsUnexpected: %+v\n", ledgerBoxOptinsUnexpected)
+ fmt.Printf("expectedOptinsNotFound: %+v\n", generatorExpectedOptinsNotFound)
+ return errs
+}
diff --git a/tools/block-generator/generator/generator_types.go b/tools/block-generator/generator/generator_types.go
new file mode 100644
index 000000000..c0ff24b4c
--- /dev/null
+++ b/tools/block-generator/generator/generator_types.go
@@ -0,0 +1,174 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package generator
+
+import (
+ "io"
+ "time"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ txn "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+// Generator is the interface needed to generate blocks.
+type Generator interface {
+ WriteReport(output io.Writer) error
+ WriteGenesis(output io.Writer) error
+ WriteBlock(output io.Writer, round uint64) error
+ WriteAccount(output io.Writer, accountString string) error
+ WriteDeltas(output io.Writer, round uint64) error
+ WriteStatus(output io.Writer) error
+ Stop()
+}
+
+type generator struct {
+ verbose bool
+
+ config GenerationConfig
+
+ // payment transaction metadata
+ numPayments uint64
+
+ // Number of algorand accounts
+ numAccounts uint64
+
+ // Block stuff
+ round uint64
+ txnCounter uint64
+ prevBlockHash string
+ timestamp int64
+ protocol protocol.ConsensusVersion
+ params config.ConsensusParams
+ genesis bookkeeping.Genesis
+ genesisID string
+ genesisHash crypto.Digest
+
+ // Rewards stuff
+ feeSink basics.Address
+ rewardsPool basics.Address
+ rewardsLevel uint64
+ rewardsResidue uint64
+ rewardsRate uint64
+ rewardsRecalculationRound uint64
+
+ // balances for all accounts. To avoid crypto and reduce storage, accounts are faked.
+ // The account is based on the index into the balances array.
+ balances []uint64
+
+ // assets is a minimal representation of the asset holdings, it doesn't
+ // include the frozen state.
+ assets []*assetData
+ // pendingAssets is used to hold newly created assets so that they are not used before
+ // being created.
+ pendingAssets []*assetData
+
+ // pendingAppMap provides a live mapping from appID to appData for each appKind
+ // for the current round
+ pendingAppMap map[appKind]map[uint64]*appData
+
+ // pendingAppSlice provides a live slice of appData for each appKind. The reason
+ // for maintaining both appMap and pendingAppSlice is to enable
+ // randomly selecting an app to interact with and yet easily access it once
+ // its identifier is known
+ pendingAppSlice map[appKind][]*appData
+
+ // appMap and appSlice store the information from their corresponding pending*
+ // data structures at the end of each round and for the rest of the experiment
+ appMap map[appKind]map[uint64]*appData
+ appSlice map[appKind][]*appData
+
+ // accountAppOptins is used to keep track of which accounts have opted into
+ // and app and enable random selection.
+ accountAppOptins map[appKind]map[uint64][]uint64
+
+ transactionWeights []float32
+
+ payTxWeights []float32
+ assetTxWeights []float32
+ appTxWeights []float32
+
+ // Reporting information from transaction type to data
+ reportData Report
+ // latestData keeps a count of how many transactions of each
+ // txType occurred in the current round.
+ latestData map[TxTypeID]uint64
+
+ // ledger
+ ledger *ledger.Ledger
+
+ // latestBlockMsgp caches the latest written block
+ latestBlockMsgp []byte
+
+ // latestPaysetWithExpectedID provides the ordered payest transactions
+ // together the expected asset/app IDs (or 0 if not applicable)
+ latestPaysetWithExpectedID []txnWithExpectedID
+
+ roundOffset uint64
+}
+type assetData struct {
+ assetID uint64
+ creator uint64
+ name string
+ // Holding at index 0 is the creator.
+ holdings []*assetHolding
+ // Set of holders in the holdings array for easy reference.
+ holders map[uint64]*assetHolding
+}
+
+type appData struct {
+ appID uint64
+ sender uint64
+ kind appKind
+ optins map[uint64]bool
+}
+
+type assetHolding struct {
+ acctIndex uint64
+ balance uint64
+}
+
+// Report is the generation report.
+type Report map[TxTypeID]TxData
+
+// EffectsReport collates transaction counts caused by a root transaction.
+type EffectsReport map[string]uint64
+
+// TxData is the generator report data.
+type TxData struct {
+ GenerationTime time.Duration `json:"generation_time_milli"`
+ GenerationCount uint64 `json:"num_generated"`
+}
+
+// TxEffect summarizes a txn type count caused by a root transaction.
+type TxEffect struct {
+ effect string
+ count uint64
+}
+
+// txnWithExpectedID rolls up an expected asset/app ID for non-pay txns
+// together with a signedTxn expected to be in the payset.
+type txnWithExpectedID struct {
+ expectedID uint64
+ signedTxn *txn.SignedTxn
+ intra uint64
+ nextIntra uint64
+}
diff --git a/tools/block-generator/generator/make_transactions.go b/tools/block-generator/generator/make_transactions.go
index cd316779e..e733d4339 100644
--- a/tools/block-generator/generator/make_transactions.go
+++ b/tools/block-generator/generator/make_transactions.go
@@ -19,16 +19,36 @@ package generator
import (
"encoding/binary"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/transactions"
+ txn "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/txntest"
"github.com/algorand/go-algorand/protocol"
)
-func (g *generator) makeTxnHeader(sender basics.Address, round, intra uint64) transactions.Header {
+// ---- header / boilerplate ----
+
+func (g *generator) makeTxnHeader(sender basics.Address, round, intra uint64) txn.Header {
+ note := make([]byte, 8)
+ binary.LittleEndian.PutUint64(note, g.txnCounter+intra)
+
+ return txn.Header{
+ Sender: sender,
+ Fee: basics.MicroAlgos{Raw: g.params.MinTxnFee},
+ FirstValid: basics.Round(round),
+ LastValid: basics.Round(round + 1000),
+ GenesisID: g.genesisID,
+ GenesisHash: g.genesisHash,
+ Note: note,
+ }
+}
+
+// makeTestTxn creates and populates the flat txntest.Txn structure with the given values.
+func (g *generator) makeTestTxn(sender basics.Address, round, intra uint64) txntest.Txn {
note := make([]byte, 8)
- binary.LittleEndian.PutUint64(note, uint64(g.txnCounter+intra))
+ binary.LittleEndian.PutUint64(note, g.txnCounter+intra)
- return transactions.Header{
+ return txntest.Txn{
Sender: sender,
Fee: basics.MicroAlgos{Raw: g.params.MinTxnFee},
FirstValid: basics.Round(round),
@@ -39,11 +59,13 @@ func (g *generator) makeTxnHeader(sender basics.Address, round, intra uint64) tr
}
}
-func (g *generator) makePaymentTxn(header transactions.Header, receiver basics.Address, amount uint64, closeRemainderTo basics.Address) transactions.Transaction {
- return transactions.Transaction{
+// ---- payments ----
+
+func (g *generator) makePaymentTxn(header txn.Header, receiver basics.Address, amount uint64, closeRemainderTo basics.Address) txn.Transaction {
+ return txn.Transaction{
Type: protocol.PaymentTx,
Header: header,
- PaymentTxnFields: transactions.PaymentTxnFields{
+ PaymentTxnFields: txn.PaymentTxnFields{
Receiver: receiver,
Amount: basics.MicroAlgos{Raw: amount},
CloseRemainderTo: closeRemainderTo,
@@ -51,11 +73,13 @@ func (g *generator) makePaymentTxn(header transactions.Header, receiver basics.A
}
}
-func (g *generator) makeAssetCreateTxn(header transactions.Header, total uint64, defaultFrozen bool, assetName string) transactions.Transaction {
- return transactions.Transaction{
+// ---- asset transactions ----
+
+func (g *generator) makeAssetCreateTxn(header txn.Header, total uint64, defaultFrozen bool, assetName string) txn.Transaction {
+ return txn.Transaction{
Type: protocol.AssetConfigTx,
Header: header,
- AssetConfigTxnFields: transactions.AssetConfigTxnFields{
+ AssetConfigTxnFields: txn.AssetConfigTxnFields{
AssetParams: basics.AssetParams{
Total: total,
DefaultFrozen: defaultFrozen,
@@ -69,21 +93,21 @@ func (g *generator) makeAssetCreateTxn(header transactions.Header, total uint64,
}
}
-func (g *generator) makeAssetDestroyTxn(header transactions.Header, index uint64) transactions.Transaction {
- return transactions.Transaction{
+func (g *generator) makeAssetDestroyTxn(header txn.Header, index uint64) txn.Transaction {
+ return txn.Transaction{
Type: protocol.AssetConfigTx,
Header: header,
- AssetConfigTxnFields: transactions.AssetConfigTxnFields{
+ AssetConfigTxnFields: txn.AssetConfigTxnFields{
ConfigAsset: basics.AssetIndex(index),
},
}
}
-func (g *generator) makeAssetTransferTxn(header transactions.Header, receiver basics.Address, amount uint64, closeAssetsTo basics.Address, index uint64) transactions.Transaction {
- return transactions.Transaction{
+func (g *generator) makeAssetTransferTxn(header txn.Header, receiver basics.Address, amount uint64, closeAssetsTo basics.Address, index uint64) txn.Transaction {
+ return txn.Transaction{
Type: protocol.AssetTransferTx,
Header: header,
- AssetTransferTxnFields: transactions.AssetTransferTxnFields{
+ AssetTransferTxnFields: txn.AssetTransferTxnFields{
XferAsset: basics.AssetIndex(index),
AssetAmount: amount,
AssetReceiver: receiver,
@@ -92,6 +116,122 @@ func (g *generator) makeAssetTransferTxn(header transactions.Header, receiver ba
}
}
-func (g *generator) makeAssetAcceptanceTxn(header transactions.Header, index uint64) transactions.Transaction {
+func (g *generator) makeAssetAcceptanceTxn(header txn.Header, index uint64) txn.Transaction {
return g.makeAssetTransferTxn(header, header.Sender, 0, basics.Address{}, index)
}
+
+// ---- application transactions ----
+
+func (g *generator) makeAppCreateTxn(kind appKind, sender basics.Address, round, intra uint64, futureAppId uint64) []txn.SignedTxn {
+ var approval, clear string
+ if kind == appKindSwap {
+ approval, clear = approvalSwap, clearSwap
+ } else {
+ approval, clear = approvalBoxes, clearBoxes
+ }
+
+ createTxn := g.makeTestTxn(sender, round, intra)
+
+ createTxn.Type = protocol.ApplicationCallTx
+ createTxn.ApprovalProgram = approval
+ createTxn.ClearStateProgram = clear
+
+ // max out local/global state usage but split
+ // 50% between bytes/uint64
+ createTxn.LocalStateSchema = basics.StateSchema{
+ NumUint: 8,
+ NumByteSlice: 8,
+ }
+ createTxn.GlobalStateSchema = basics.StateSchema{
+ NumUint: 32,
+ NumByteSlice: 32,
+ }
+
+ createTxFee := g.params.MinTxnFee
+ senderIndex := accountToIndex(sender)
+
+ // TODO: should check for min balance
+ g.balances[senderIndex] -= createTxFee
+ if kind != appKindBoxes {
+ return txntest.Group(&createTxn)
+ }
+
+ // also group in a pay txn to fund the app
+ pstFee := uint64(1_000)
+ pstAmt := uint64(2_000_000)
+
+ paySibTxn := g.makeTestTxn(sender, round, intra)
+ paySibTxn.Type = protocol.PaymentTx
+ paySibTxn.Receiver = basics.AppIndex(futureAppId).Address()
+ paySibTxn.Fee = basics.MicroAlgos{Raw: pstFee}
+ paySibTxn.Amount = uint64(pstAmt)
+
+ // TODO: should check for min balance}
+ g.balances[senderIndex] -= (pstFee + pstAmt)
+
+ return txntest.Group(&createTxn, &paySibTxn)
+}
+
+// makeAppOptinTxn currently only works for the boxes app
+func (g *generator) makeAppOptinTxn(sender basics.Address, round, intra uint64, kind appKind, appIndex uint64) []txn.SignedTxn {
+ if kind != appKindBoxes {
+ panic("makeAppOptinTxn only works for the boxes app currently")
+ }
+
+ optInTxn := g.makeTestTxn(sender, round, intra)
+ /* all 0 values but keep around for reference
+ optInTxn.ApplicationArgs = nil
+ optInTxn.ForeignApps = nil
+ optInTxn.ForeignAssets = nil
+ optInTxn.ExtraProgramPages = 0
+ */
+
+ optInTxn.Type = protocol.ApplicationCallTx
+ optInTxn.ApplicationID = basics.AppIndex(appIndex)
+ optInTxn.OnCompletion = txn.OptInOC
+ // the first inner sends some algo to the creator:
+ optInTxn.Accounts = []basics.Address{indexToAccount(g.appMap[kind][appIndex].sender)}
+ optInTxn.Boxes = []txn.BoxRef{
+ {Name: crypto.Digest(sender).ToSlice()},
+ }
+
+ // TODO: these may not make sense for the swap optin
+
+ pstFee := uint64(2_000)
+ pstAmt := uint64(2_000_000)
+
+ paySibTxn := g.makeTestTxn(sender, round, intra)
+ paySibTxn.Type = protocol.PaymentTx
+ paySibTxn.Receiver = basics.AppIndex(appIndex).Address()
+ paySibTxn.Fee = basics.MicroAlgos{Raw: pstFee}
+ paySibTxn.Amount = uint64(pstAmt)
+
+ senderIndex := accountToIndex(sender)
+ // TODO: should check for min balance}
+ // TODO: for the case of boxes, should refund 0.76 algo
+ g.balances[senderIndex] -= (pstFee + pstAmt)
+
+ return txntest.Group(&optInTxn, &paySibTxn)
+}
+
+// makeAppCallTxn currently only works for the boxes app
+func (g *generator) makeAppCallTxn(sender basics.Address, round, intra, appIndex uint64) txn.Transaction {
+ callTxn := g.makeTestTxn(sender, round, intra)
+ callTxn.Type = protocol.ApplicationCallTx
+ callTxn.ApplicationID = basics.AppIndex(appIndex)
+ callTxn.OnCompletion = txn.NoOpOC // redundant for clarity
+ callTxn.ApplicationArgs = [][]byte{
+ {0xe1, 0xf9, 0x3f, 0x1d}, // the method selector for getting a box
+ }
+
+ callTxn.Boxes = []txn.BoxRef{
+ {Name: crypto.Digest(sender).ToSlice()},
+ }
+
+ // TODO: should check for min balance
+ appCallTxFee := g.params.MinTxnFee
+ senderIndex := accountToIndex(sender)
+ g.balances[senderIndex] -= appCallTxFee
+
+ return callTxn.Txn()
+}
diff --git a/tools/block-generator/generator/server.go b/tools/block-generator/generator/server.go
index 81a7546b1..2aac4a455 100644
--- a/tools/block-generator/generator/server.go
+++ b/tools/block-generator/generator/server.go
@@ -19,34 +19,20 @@ package generator
import (
"fmt"
"net/http"
- "os"
"strconv"
"strings"
"time"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/tools/block-generator/util"
- "gopkg.in/yaml.v3"
)
-func initializeConfigFile(configFile string) (config GenerationConfig, err error) {
- data, err := os.ReadFile(configFile)
- if err != nil {
- return
- }
- err = yaml.Unmarshal(data, &config)
- if err != nil {
- return
- }
- return
-}
-
// MakeServer configures http handlers. Returns the http server.
func MakeServer(configFile string, addr string) (*http.Server, Generator) {
noOp := func(next http.Handler) http.Handler {
return next
}
- return MakeServerWithMiddleware(0, "", configFile, addr, noOp)
+ return MakeServerWithMiddleware(0, "", configFile, false, addr, noOp)
}
// BlocksMiddleware is a middleware for the blocks endpoint.
@@ -55,15 +41,16 @@ type BlocksMiddleware func(next http.Handler) http.Handler
// MakeServerWithMiddleware allows injecting a middleware for the blocks handler.
// This is needed to simplify tests by stopping block production while validation
// is done on the data.
-func MakeServerWithMiddleware(dbround uint64, genesisFile string, configFile string, addr string, blocksMiddleware BlocksMiddleware) (*http.Server, Generator) {
- config, err := initializeConfigFile(configFile)
+func MakeServerWithMiddleware(dbround uint64, genesisFile string, configFile string, verbose bool, addr string, blocksMiddleware BlocksMiddleware) (*http.Server, Generator) {
+ cfg, err := initializeConfigFile(configFile)
util.MaybeFail(err, "problem loading config file. Use '--config' or create a config file.")
var bkGenesis bookkeeping.Genesis
if genesisFile != "" {
bkGenesis, err = bookkeeping.LoadGenesisFromFile(genesisFile)
+ // TODO: consider using bkGenesis to set cfg.NumGenesisAccounts and cfg.GenesisAccountInitialBalance
util.MaybeFail(err, "Failed to parse genesis file '%s'", genesisFile)
}
- gen, err := MakeGenerator(dbround, bkGenesis, config)
+ gen, err := MakeGenerator(dbround, bkGenesis, cfg, verbose)
util.MaybeFail(err, "Failed to make generator with config file '%s'", configFile)
mux := http.NewServeMux()
diff --git a/tools/block-generator/generator/server_test.go b/tools/block-generator/generator/server_test.go
index 7007db00f..a885181dd 100644
--- a/tools/block-generator/generator/server_test.go
+++ b/tools/block-generator/generator/server_test.go
@@ -18,7 +18,6 @@ package generator
import (
"fmt"
- "os"
"strings"
"testing"
@@ -27,24 +26,6 @@ import (
"github.com/stretchr/testify/require"
)
-func TestInitConfigFile(t *testing.T) {
- partitiontest.PartitionTest(t)
- config, err := initializeConfigFile("../test_config.yml")
- require.NoError(t, err)
- require.Equal(t, uint64(10), config.NumGenesisAccounts)
- require.Equal(t, float32(0.25), config.AssetCloseFraction)
- require.Equal(t, float32(0.0), config.AssetDestroyFraction)
-}
-
-func TestInitConfigFileNotExist(t *testing.T) {
- partitiontest.PartitionTest(t)
- _, err := initializeConfigFile("this_is_not_a_config_file")
-
- if _, ok := err.(*os.PathError); !ok {
- require.Fail(t, "This should generate a path error")
- }
-}
-
func TestParseURL(t *testing.T) {
partitiontest.PartitionTest(t)
const blockQueryPrefix = "http://v2/blocks/"
@@ -112,17 +93,19 @@ func TestParseURL(t *testing.T) {
},
}
- for _, testcase := range testcases {
- t.Run(testcase.name, func(t *testing.T) {
- round, err := parseURL(testcase.url)
- if len(testcase.err) == 0 {
+ for _, tc := range testcases {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ round, err := parseURL(tc.url)
+ if len(tc.err) == 0 {
msg := fmt.Sprintf("Unexpected error parsing '%s', expected round '%s' received error: %v",
- testcase.url, testcase.expectedParam, err)
+ tc.url, tc.expectedParam, err)
require.NoError(t, err, msg)
- assert.Equal(t, testcase.expectedParam, round)
+ assert.Equal(t, tc.expectedParam, round)
} else {
- require.Error(t, err, fmt.Sprintf("Expected an error containing: %s", testcase.err))
- require.True(t, strings.Contains(err.Error(), testcase.err))
+ require.Error(t, err, fmt.Sprintf("Expected an error containing: %s", tc.err))
+ require.True(t, strings.Contains(err.Error(), tc.err))
}
})
}
diff --git a/tools/block-generator/generator/teal/poap_boxes.teal b/tools/block-generator/generator/teal/poap_boxes.teal
new file mode 100644
index 000000000..b007005f6
--- /dev/null
+++ b/tools/block-generator/generator/teal/poap_boxes.teal
@@ -0,0 +1,551 @@
+// SOURCE: https://algoscan.app/app/946441472
+#pragma version 8
+
+intcblock 0 2 1 6 2000 5 4
+bytecblock 0x151f7c75 0x706f61705f6f6e626f6172645f636f756e74 0x706f61705f636f756e74 0x706f61705f617574686f725f61646472657373
+txn RekeyTo
+global ZeroAddress
+==
+assert
+
+txn CloseRemainderTo
+global ZeroAddress
+==
+assert
+
+txn AssetCloseTo
+global ZeroAddress
+==
+assert
+
+txn TypeEnum
+intc_3 // 6
+==
+assert
+
+txn Fee
+global MinTxnFee
+>=
+txn Fee
+intc_0 // 0
+==
+||
+assert
+
+txn ApplicationID
+bz label1
+
+txn OnCompletion
+intc 5 // 5
+==
+bnz label2
+
+txn OnCompletion
+intc 6 // 4
+==
+bnz label3
+
+txn OnCompletion
+intc_1 // 2
+==
+bnz label4
+
+txn OnCompletion
+intc_2 // 1 == OptInOC
+==
+bnz label5
+
+txn NumAppArgs
+intc_0 // 0
+==
+bz label6 // if have some app args ... continue ...
+b label7 // if no app app args: error
+
+label6:
+
+ pushbytes 0x53743e2e // "St>."
+ txna ApplicationArgs 0
+ ==
+ bnz label8
+
+ pushbytes 0xddc3d103 // 0xddc3d103
+ txna ApplicationArgs 0
+ ==
+ bnz label9 // create an app with approval in arg1 and clear in arg2 and return the new appID
+
+ pushbytes 0x935e5c25 // 0x935e5c25
+ txna ApplicationArgs 0
+ ==
+ bnz label10 // lets you call an arbitrary app with appid provided in arg1, approval program arg2, and clear program arg3
+
+ pushbytes 0xedbb9ab8 // 0xedbb9ab8
+ txna ApplicationArgs 0
+ ==
+ bnz label11 // delete case
+
+ pushbytes 0xe1f93f1d // 0xe1f93f1d <--- what we want to call
+ txna ApplicationArgs 0
+ ==
+ bnz label12
+ b label7
+
+label17:
+
+ txn Sender
+ global CreatorAddress
+ ==
+ assert
+ retsub
+
+label1: // App Creation
+
+ bytec_0 // 0x151f7c75
+ global CurrentApplicationID
+ itob
+ concat
+ b globalset
+
+label15: // trying to optin with non-zero balance and when app already exists
+
+ global GroupSize
+ intc_1 // 2
+ ==
+ assert // must be in a group of 2
+
+ gtxn 1 Amount
+ int 2000000 // pay 2 algo
+ // intc 4 // 2000
+ ==
+
+ gtxn 1 Receiver
+ global CurrentApplicationAddress
+ ==
+ &&
+ assert
+
+ // we don't want to limit the generator to only allow the creator to be the sender
+ // so the following is commented out:
+ // gtxn 1 Sender
+ // global CreatorAddress
+ // ==
+ // assert // 2nd txn in the group must be a pymt to the app for .002 Algos from the creator
+ gtxn 1 Fee
+
+ intc 4 // 2000
+ ==
+ assert // fee must be 2000
+
+ itxn_begin
+ intc_2 // 1
+ itxn_field TypeEnum // payment txn
+ txn Sender
+ itxn_field Receiver
+ pushint 764000
+ itxn_field Amount
+ global MinTxnFee
+ itxn_field Fee
+ itxn_submit // send 0.764 Algos from the app account to the Sender!!!
+
+ itxn_begin
+ intc_2 // 1
+ itxn_field TypeEnum // payment txn
+ global CreatorAddress
+ itxn_field Receiver
+ pushint 4000
+ itxn_field Amount
+ global MinTxnFee
+ itxn_field Fee
+ itxn_submit // send .004 Algos to the creator
+ retsub
+
+label5:
+
+ txn Sender
+ store 3
+
+ load 3 // Sender
+ balance
+ store 4 // SenderBalance
+
+ load 4
+ bz label7
+ callsub label15 // This introduces 2 inner txns
+
+ bytec_1 // "poap_onboard_count"
+ dup
+ app_global_get
+ intc_2 // 1
+ +
+ app_global_put
+ callsub label16
+
+ bytec_0 // 0x151f7c75
+ pushbytes 0x616c676f706f61705f6f7074696e // "algopoap_optin"
+ concat
+ b label13
+
+label16:
+
+ load 3 // Sender
+ // 992 = 1024 - 32 bytes:
+ byte "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
+
+ box_put // boxes[Sender] = "z" * 992 - our box of size 1024 has MBR 412,100 µA
+ retsub
+
+label2:
+
+ callsub label17
+
+ bytec_0 // 0x151f7c75
+ global CurrentApplicationID
+ itob
+ concat
+ b label13
+
+label3:
+
+ callsub label17
+
+ bytec_0 // 0x151f7c75
+ global CurrentApplicationID
+ itob
+ concat
+ b label13
+
+label4:
+
+ txn Sender
+ store 3 // 3 -> Sender
+
+ load 3
+ box_del
+ pop
+ bytec_0 // 0x151f7c75
+ pushbytes 0x616c676f706f61705f636c6f73656f7574 // "algopoap_closeout"
+ concat
+ b label13
+
+label8:
+
+ callsub label17
+
+ bytec_1 // "poap_onboard_count"
+ intc_0 // 0
+ app_global_put
+
+ bytec_2 // "poap_count"
+ intc_0 // 0
+ app_global_put
+
+ bytec_0 // 0x151f7c75
+ txna ApplicationArgs 1
+ concat
+ b label13
+
+label9:
+
+ intc_0 // 0
+ global CurrentApplicationID
+ app_opted_in
+ assert
+
+ global GroupSize
+ intc_1 // 2
+ ==
+ assert
+
+ gtxn 0 RekeyTo
+ global ZeroAddress
+ ==
+ assert
+
+ gtxn 0 CloseRemainderTo
+ global ZeroAddress
+ ==
+ assert
+
+ gtxn 0 AssetCloseTo
+ global ZeroAddress
+ ==
+ assert
+
+ gtxn 0 TypeEnum
+ intc_2 // 1 // first txn in group is a payment
+ ==
+ assert
+
+ gtxn 0 Fee
+ global MinTxnFee
+ >=
+ assert
+
+ gtxn 0 Amount
+ intc 4 // 2000
+ >=
+ assert
+
+ txn GroupIndex
+ intc_2 // 1
+ -
+ dup
+ gtxns Receiver
+ global CurrentApplicationAddress
+ ==
+ assert
+
+ gtxns Amount
+ global MinTxnFee
+ intc_1 // 2
+ *
+ >=
+ assert
+
+ itxn_begin
+ intc_3 // 6
+ itxn_field TypeEnum // app call type
+ intc_0 // 0
+ itxn_field OnCompletion
+ txna ApplicationArgs 1
+ dup
+ len
+ intc_1 // 2
+ swap
+ substring3
+ itxn_field ApprovalProgram
+ txna ApplicationArgs 2
+ dup
+ len
+ intc_1 // 2
+ swap
+ substring3
+ itxn_field ClearStateProgram
+ pushint 12
+ itxn_field GlobalNumByteSlice
+ pushint 18
+ itxn_field GlobalNumUint
+ intc_1 // 2
+ itxn_field LocalNumUint
+ intc_2 // 1
+ itxn_field LocalNumByteSlice
+ txn Sender
+ itxn_field Note
+ itxn_submit
+
+ itxn CreatedApplicationID
+ store 2
+
+ bytec_2 // "poap_count"
+ dup
+ app_global_get
+ intc_2 // 1
+ +
+ app_global_put
+
+ bytec_0 // 0x151f7c75
+ load 2
+ itob
+ concat
+ b label13
+
+label10:
+
+ intc_0 // 0
+ global CurrentApplicationID
+ app_opted_in
+ assert
+
+ itxn_begin
+ intc_3 // 6
+ itxn_field TypeEnum // app call type
+ intc 6 // 4
+ itxn_field OnCompletion
+ txna ApplicationArgs 1
+ btoi
+ txnas Applications
+ dup
+ store 1
+
+ itxn_field ApplicationID
+ load 1
+ bytec_3 // "poap_author_address"
+ app_global_get_ex
+ assert
+
+ txn Sender
+ ==
+ assert
+
+ txna ApplicationArgs 2
+ dup
+ len
+ intc_1 // 2
+ swap
+ substring3
+ itxn_field ApprovalProgram
+ txna ApplicationArgs 3
+ dup
+ len
+ intc_1 // 2
+ swap
+ substring3
+ itxn_field ClearStateProgram
+ txn Sender
+ itxn_field Note
+ itxn_submit
+
+ bytec_0 // 0x151f7c75
+ load 1
+ itob
+ concat
+ b label13
+
+label11:
+
+ intc_0 // 0
+ global CurrentApplicationID
+ app_opted_in
+ assert
+
+ itxn_begin
+ txna Assets 0
+ itxn_field Assets
+ intc_3 // 6
+ itxn_field TypeEnum // 6 => app call
+ intc 5 // 5
+ itxn_field OnCompletion // 5 => delete application
+ txna ApplicationArgs 1
+ btoi
+ txnas Applications
+ dup
+ store 1
+
+ itxn_field ApplicationID
+ load 1
+ bytec_3 // "poap_author_address"
+ app_global_get_ex
+ assert
+
+ txn Sender
+ ==
+ assert
+ itxn_submit
+
+ bytec_0 // 0x151f7c75
+ b label13
+
+label12:
+
+ // intc_0 // 0
+ txn Sender
+
+ global CurrentApplicationID
+ app_opted_in
+ assert
+
+ bytec_0 // 0x151f7c75
+ txn Sender
+ box_get
+ assert
+
+ concat
+ b label13
+
+label7:
+
+ err
+
+label13:
+
+ log
+
+ intc_2 // 1
+ return
+
+globalset:
+
+ // 64 bytes
+ byte "Why did the chicken cross the road? The answer has been omitted."
+
+ txn Sender
+ byte "0000000000000000000000000000000000000000000000000000000000000000"
+ dig 2
+ app_global_put
+
+ txn Sender
+ byte "1111111111111111111111111111111111111111111111111111111111111111"
+ dig 2
+ app_global_put
+
+ txn Sender
+ byte "2222222222222222222222222222222222222222222222222222222222222222"
+ dig 2
+ app_global_put
+
+ txn Sender
+ byte "3333333333333333333333333333333333333333333333333333333333333332"
+ dig 2
+ app_global_put
+
+ txn Sender
+ byte "4444444444444444444444444444444444444444444444444444444444444444"
+ dig 2
+ app_global_put
+
+ txn Sender
+ byte "5555555555555555555555555555555555555555555555555555555555555555"
+ dig 2
+ app_global_put
+
+ txn Sender
+ byte "6666666666666666666666666666666666666666666666666666666666666666"
+ dig 2
+ app_global_put
+
+ txn Sender
+ byte "7777777777777777777777777777777777777777777777777777777777777777"
+ uncover 2
+ app_global_put
+
+ int 1337
+ txn Sender
+ byte "8888888888888888888888888888888888888888888888888888888888888888"
+ dig 2
+ app_global_put
+
+ txn Sender
+ byte "9999999999999999999999999999999999999999999999999999999999999999"
+ dig 2
+ app_global_put
+
+ txn Sender
+ byte "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ dig 2
+ app_global_put
+
+ txn Sender
+ byte "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
+ dig 2
+ app_global_put
+
+ txn Sender
+ byte "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"
+ dig 2
+ app_global_put
+
+ txn Sender
+ byte "dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"
+ dig 2
+ app_global_put
+
+ txn Sender
+ byte "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"
+ dig 2
+ app_global_put
+
+ txn Sender
+ byte "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+ uncover 2
+ app_global_put
+ b label13
+
diff --git a/tools/block-generator/generator/teal/poap_clear.teal b/tools/block-generator/generator/teal/poap_clear.teal
new file mode 100644
index 000000000..cc11ea2e7
--- /dev/null
+++ b/tools/block-generator/generator/teal/poap_clear.teal
@@ -0,0 +1,5 @@
+#pragma version 8
+
+pushint 1
+return
+
diff --git a/tools/block-generator/generator/teal/swap_amm.teal b/tools/block-generator/generator/teal/swap_amm.teal
new file mode 100644
index 000000000..9e3bfc3d4
--- /dev/null
+++ b/tools/block-generator/generator/teal/swap_amm.teal
@@ -0,0 +1,527 @@
+// SOURCE: https://algoscan.app/app/1000469889
+#pragma version 7
+
+intcblock 0 1 4
+txn NumAppArgs
+intc_0 // 0
+==
+bnz label1
+
+txna ApplicationArgs 0
+pushbytes 0x67c4469e // 0x67c4469e
+==
+bnz label2
+
+txna ApplicationArgs 0
+pushbytes 0xaa6d419d // 0xaa6d419d
+==
+bnz label3
+
+txna ApplicationArgs 0
+pushbytes 0x92e297a1 // 0x92e297a1
+==
+bnz label4
+
+txna ApplicationArgs 0
+pushbytes 0x99b36a95 // 0x99b36a95
+==
+bnz label5
+err
+
+label5:
+
+ txn OnCompletion
+ intc_0 // 0
+ ==
+ txn ApplicationID
+ intc_0 // 0
+ !=
+ &&
+ assert
+
+ txna ApplicationArgs 1
+ intc_0 // 0
+ getbyte
+ store 5
+
+ txna ApplicationArgs 2
+ btoi
+ store 6
+
+ load 5
+ load 6
+ callsub label6
+
+ intc_1 // 1
+ return
+
+label4:
+
+ txn OnCompletion
+ intc_0 // 0
+ ==
+ txn ApplicationID
+ intc_0 // 0
+ !=
+ &&
+ assert
+
+ txna ApplicationArgs 1
+ intc_0 // 0
+ getbyte
+ callsub label7
+
+ intc_1 // 1
+ return
+
+label3:
+
+ txn OnCompletion
+ intc_0 // 0
+ ==
+ txn ApplicationID
+ intc_0 // 0
+ !=
+ &&
+ assert
+
+ txna ApplicationArgs 1
+ intc_0 // 0
+ getbyte
+ store 4
+
+ txn GroupIndex
+ intc_1 // 1
+ -
+ store 3
+
+ load 3
+ gtxns TypeEnum
+ intc_1 // 1
+ ==
+ assert
+
+ load 3
+ load 4
+ callsub label8
+
+ intc_1 // 1
+ return
+
+label2:
+
+ txn OnCompletion
+ intc_0 // 0
+ ==
+ txn ApplicationID
+ intc_0 // 0
+ !=
+ &&
+ assert
+
+ txna ApplicationArgs 1
+ intc_0 // 0
+ getbyte
+ store 0
+
+ txna ApplicationArgs 2
+ intc_0 // 0
+ getbyte
+ store 1
+
+ txna ApplicationArgs 3
+ store 2
+
+ load 0
+ load 1
+ load 2
+ callsub label9
+
+ intc_1 // 1
+ return
+
+label1:
+
+ txn OnCompletion
+ intc_0 // 0
+ ==
+ bnz label10
+
+ txn OnCompletion
+ pushint 5
+ ==
+ bnz label11
+ b localsfill
+
+label11:
+
+ txn Sender
+ global CreatorAddress
+ ==
+ assert
+
+ intc_1 // 1
+ return
+
+label10:
+
+ txn ApplicationID
+ intc_0 // 0
+ ==
+ assert
+
+ intc_1 // 1
+ return
+
+label22:
+
+ store 13
+
+ load 13
+ intc_0 // 0
+ ==
+ bnz label12
+
+ global CurrentApplicationAddress
+ load 13
+ asset_holding_get AssetBalance
+ store 15
+ store 14
+
+ load 15
+ bnz label13
+
+ itxn_begin
+ intc_2 // 4
+ itxn_field TypeEnum
+ load 13
+ itxn_field XferAsset
+ global CurrentApplicationAddress
+ itxn_field AssetReceiver
+ intc_0 // 0
+ itxn_field AssetAmount
+ global MinTxnFee
+ itxn_field Fee
+ itxn_submit
+ b label14
+
+label13:
+
+ retsub
+
+label12:
+
+ retsub
+
+label14:
+
+ retsub
+
+label17:
+
+ store 24
+ store 23
+ store 22
+
+ load 22
+ intc_0 // 0
+ ==
+ bnz label15
+
+ intc_2 // 4
+ itxn_field TypeEnum
+ load 22
+ itxn_field XferAsset
+ load 23
+ itxn_field AssetReceiver
+ load 24
+ itxn_field AssetAmount
+ intc_0 // 0
+ itxn_field Fee
+ b label16
+
+label15:
+
+ intc_1 // 1
+ itxn_field TypeEnum
+ load 23
+ itxn_field Receiver
+ load 24
+ itxn_field Amount
+ intc_0 // 0
+ itxn_field Fee
+label16:
+
+ retsub
+
+label24:
+
+ store 19
+ store 18
+ store 17
+ store 16
+
+ load 16
+ app_params_get AppAddress
+ store 21
+ store 20
+
+ itxn_begin
+ load 18
+ load 20
+ load 17
+ callsub label17
+ itxn_next
+
+ pushbytes 0x53574150 // "SWAP"
+ itxn_field ApplicationArgs
+ intc_0 // 0
+ itob
+ itxn_field ApplicationArgs
+ intc_0 // 0
+ itxn_field Fee
+ load 16
+ itxn_field ApplicationID
+ load 18
+ load 19
+ <
+ bnz label18
+
+ load 19
+label21:
+
+ itxn_field Assets
+ load 18
+ load 19
+ >
+ bnz label19
+
+ load 19
+ b label20
+
+label19:
+
+ load 18
+ b label20
+
+label18:
+
+ load 18
+ b label21
+
+label20:
+
+ itxn_field Assets
+ pushint 6
+ itxn_field TypeEnum
+ itxn_submit
+ retsub
+
+label9:
+
+ store 9
+ store 8
+ store 7
+
+ load 8
+ txnas Assets
+ callsub label22
+
+ load 9
+ pushint 2
+ extract_uint64
+ store 11
+
+ load 9
+ pushint 10
+ extract_uint64
+ store 12
+
+ intc_0 // 0
+ store 10
+
+label25:
+
+ load 10
+ pushint 85
+ <
+ bz label23
+
+ load 11
+ load 12
+ load 7
+ txnas Assets
+ load 8
+ txnas Assets
+ callsub label24
+
+ load 10
+ intc_1 // 1
+ +
+ store 10
+ b label25
+
+label23:
+
+ retsub
+
+label8:
+
+ store 25
+
+ gtxns Amount
+ pushint 100000
+ >=
+ assert
+
+ itxn_begin
+ load 25
+ txnas Assets
+ global CurrentApplicationAddress
+ intc_0 // 0
+ callsub label17
+ itxn_submit
+
+ intc_1 // 1
+ return
+
+label7:
+
+ store 26
+
+ global CurrentApplicationAddress
+ load 26
+ txnas Assets
+ asset_holding_get AssetBalance
+ store 28
+ store 27
+
+ txn Sender
+ global CreatorAddress
+ ==
+ assert
+
+ itxn_begin
+ load 26
+ txnas Assets
+ global CreatorAddress
+ load 27
+ callsub label17
+
+ global CreatorAddress
+ itxn_field AssetCloseTo
+ itxn_submit
+
+ intc_1 // 1
+ return
+
+label6:
+
+ store 30
+ store 29
+
+ txn Sender
+ global CreatorAddress
+ ==
+ assert
+
+ itxn_begin
+ load 29
+ txnas Assets
+ global CreatorAddress
+ load 30
+ callsub label17
+ itxn_submit
+
+ intc_1 // 1
+ return
+
+localsfill:
+
+ // 64 bytes
+ byte "Why did the chicken cross the road? The answer has been omitted."
+
+ txn Sender
+ byte "0000000000000000000000000000000000000000000000000000000000000000"
+ dig 2
+ app_local_put
+
+ txn Sender
+ byte "1111111111111111111111111111111111111111111111111111111111111111"
+ dig 2
+ app_local_put
+
+ txn Sender
+ byte "2222222222222222222222222222222222222222222222222222222222222222"
+ dig 2
+ app_local_put
+
+ txn Sender
+ byte "3333333333333333333333333333333333333333333333333333333333333332"
+ dig 2
+ app_local_put
+
+ txn Sender
+ byte "4444444444444444444444444444444444444444444444444444444444444444"
+ dig 2
+ app_local_put
+
+ txn Sender
+ byte "5555555555555555555555555555555555555555555555555555555555555555"
+ dig 2
+ app_local_put
+
+ txn Sender
+ byte "6666666666666666666666666666666666666666666666666666666666666666"
+ dig 2
+ app_local_put
+
+ txn Sender
+ byte "7777777777777777777777777777777777777777777777777777777777777777"
+ uncover 2
+ app_local_put
+
+ int 1337
+ txn Sender
+ byte "8888888888888888888888888888888888888888888888888888888888888888"
+ dig 2
+ app_local_put
+
+ txn Sender
+ byte "9999999999999999999999999999999999999999999999999999999999999999"
+ dig 2
+ app_local_put
+
+ txn Sender
+ byte "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ dig 2
+ app_local_put
+
+ txn Sender
+ byte "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
+ dig 2
+ app_local_put
+
+ txn Sender
+ byte "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"
+ dig 2
+ app_local_put
+
+ txn Sender
+ byte "dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"
+ dig 2
+ app_local_put
+
+ txn Sender
+ byte "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"
+ dig 2
+ app_local_put
+
+ txn Sender
+ byte "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+ uncover 2
+ app_local_put
+
+ int 1
+ return
+
diff --git a/tools/block-generator/generator/teal/swap_clear.teal b/tools/block-generator/generator/teal/swap_clear.teal
new file mode 100644
index 000000000..dd8ebd452
--- /dev/null
+++ b/tools/block-generator/generator/teal/swap_clear.teal
@@ -0,0 +1,5 @@
+#pragma version 7
+
+pushint 0
+return
+
diff --git a/tools/block-generator/test_config.yml b/tools/block-generator/generator/test_scenario.yml
index 6d411e9ad..6d411e9ad 100644
--- a/tools/block-generator/test_config.yml
+++ b/tools/block-generator/generator/test_scenario.yml
diff --git a/tools/block-generator/go.mod b/tools/block-generator/go.mod
index 306627f71..12649d09b 100644
--- a/tools/block-generator/go.mod
+++ b/tools/block-generator/go.mod
@@ -2,11 +2,12 @@ module github.com/algorand/go-algorand/tools/block-generator
replace github.com/algorand/go-algorand => ../..
-go 1.17
+go 1.20
require (
+ github.com/algorand/avm-abi v0.2.0
github.com/algorand/go-algorand v0.0.0-00010101000000-000000000000
- github.com/algorand/go-codec/codec v1.1.9
+ github.com/algorand/go-codec/codec v1.1.10
github.com/algorand/go-deadlock v0.2.2
github.com/lib/pq v1.10.9
github.com/spf13/cobra v1.7.0
@@ -16,11 +17,11 @@ require (
require (
github.com/DataDog/zstd v1.5.2 // indirect
- github.com/algorand/avm-abi v0.2.0 // indirect
- github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414 // indirect
+ github.com/algorand/falcon v0.1.0 // indirect
github.com/algorand/go-sumhash v0.1.0 // indirect
- github.com/algorand/msgp v1.1.53 // indirect
+ github.com/algorand/msgp v1.1.55 // indirect
github.com/algorand/oapi-codegen v1.12.0-algorand.0 // indirect
+ github.com/algorand/sortition v1.0.0 // indirect
github.com/algorand/websocket v1.4.6 // indirect
github.com/aws/aws-sdk-go v1.33.0 // indirect
github.com/consensys/gnark-crypto v0.7.0 // indirect
@@ -44,6 +45,7 @@ require (
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
golang.org/x/crypto v0.1.0 // indirect
+ golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect
golang.org/x/net v0.9.0 // indirect
golang.org/x/sys v0.7.0 // indirect
gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009 // indirect
diff --git a/tools/block-generator/go.sum b/tools/block-generator/go.sum
index bd2732c8f..86dd5643d 100644
--- a/tools/block-generator/go.sum
+++ b/tools/block-generator/go.sum
@@ -1,125 +1,29 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
-cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
-cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
-cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
-cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
-cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
-cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
-cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
-cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
-cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
-cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
-cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
-cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
-cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
-cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
-cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM=
-cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
-cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
-cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
-cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8=
github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/algorand/avm-abi v0.2.0 h1:bkjsG+BOEcxUcnGSALLosmltE0JZdg+ZisXKx0UDX2k=
github.com/algorand/avm-abi v0.2.0/go.mod h1:+CgwM46dithy850bpTeHh9MC99zpn2Snirb3QTl2O/g=
-github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414 h1:nwYN+GQ7Z5OOfZwqBO1ma7DSlP7S1YrKWICOyjkwqrc=
-github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414/go.mod h1:OkQyHlGvS0kLNcIWbC21/uQcnbfwSOQm+wiqWwBG9pQ=
-github.com/algorand/go-codec/codec v1.1.9 h1:el4HFSPZhP+YCgOZxeFGB/BqlNkaUIs55xcALulUTCM=
-github.com/algorand/go-codec/codec v1.1.9/go.mod h1:YkEx5nmr/zuCeaDYOIhlDg92Lxju8tj2d2NrYqP7g7k=
+github.com/algorand/falcon v0.1.0 h1:xl832kfZ7hHG6B4p90DQynjfKFGbIUgUOnsRiMZXfAo=
+github.com/algorand/falcon v0.1.0/go.mod h1:OkQyHlGvS0kLNcIWbC21/uQcnbfwSOQm+wiqWwBG9pQ=
+github.com/algorand/go-codec/codec v1.1.10 h1:zmWYU1cp64jQVTOG8Tw8wa+k0VfwgXIPbnDfiVa+5QA=
+github.com/algorand/go-codec/codec v1.1.10/go.mod h1:YkEx5nmr/zuCeaDYOIhlDg92Lxju8tj2d2NrYqP7g7k=
github.com/algorand/go-deadlock v0.2.2 h1:L7AKATSUCzoeVuOgpTipfCEjdUu5ECmlje8R7lP9DOY=
github.com/algorand/go-deadlock v0.2.2/go.mod h1:Hat1OXKqKNUcN/iv74FjGhF4hsOE2l7gOgQ9ZVIq6Fk=
github.com/algorand/go-sumhash v0.1.0 h1:b/QRhyLuF//vOcicBIxBXYW8bERNoeLxieht/dUYpVg=
github.com/algorand/go-sumhash v0.1.0/go.mod h1:OOe7jdDWUhLkuP1XytkK5gnLu9entAviN5DfDZh6XAc=
-github.com/algorand/graphtrace v0.1.0/go.mod h1:HscLQrzBdH1BH+5oehs3ICd8SYcXvnSL9BjfTu8WHCc=
-github.com/algorand/msgp v1.1.53 h1:D6HKLyvLE6ltfsf8Apsrc+kqYb/CcOZEAfh1DpkPrNg=
-github.com/algorand/msgp v1.1.53/go.mod h1:5K3d58/poT5fPmtiwuQft6GjgSrVEM46KoXdLrID8ZU=
+github.com/algorand/msgp v1.1.55 h1:kWc9Xc08xtxCTWUiq1cRW5XGF+DFcfSGihYf0IZ/ivs=
+github.com/algorand/msgp v1.1.55/go.mod h1:RqZQBzAFDWpwh5TlabzZkWy+6kwL9cvXfLbU0gD99EA=
github.com/algorand/oapi-codegen v1.12.0-algorand.0 h1:W9PvED+wAJc+9EeXPONnA+0zE9UhynEqoDs4OgAxKhk=
github.com/algorand/oapi-codegen v1.12.0-algorand.0/go.mod h1:tIWJ9K/qrLDVDt5A1p82UmxZIEGxv2X+uoujdhEAL48=
+github.com/algorand/sortition v1.0.0 h1:PJiZtdSTBm4nArQrZXBnhlljHXhuyAXRJBqVWowQu3E=
+github.com/algorand/sortition v1.0.0/go.mod h1:23CZwAbTWPv0bBsq+Php/2J6Y/iXDyzlfcZyepeY5Fo=
github.com/algorand/websocket v1.4.6 h1:I0kV4EYwatuUrKtNiwzYYgojgwh6pksDmlqntKG2Woc=
github.com/algorand/websocket v1.4.6/go.mod h1:HJmdGzFtnlUQ4nTzZP6WrT29oGYf1t6Ybi64vROcT+M=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
-github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
-github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
-github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aws/aws-sdk-go v1.33.0 h1:Bq5Y6VTLbfnJp1IV8EL/qUU5qO1DYHda/zis/sqevkY=
github.com/aws/aws-sdk-go v1.33.0/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e h1:CHPYEbz71w8DqJ7DRIq+MXyCQsdibK08vdcQTY4ufas=
-github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e/go.mod h1:6Xhs0ZlsRjXLIiSMLKafbZxML/j30pg9Z1priLuha5s=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
-github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/consensys/bavard v0.1.10/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
github.com/consensys/gnark-crypto v0.7.0 h1:rwdy8+ssmLYRqKp+ryRRgQJl/rCq2uv+n83cOydm5UE=
github.com/consensys/gnark-crypto v0.7.0/go.mod h1:KPSuJzyxkJA8xZ/+CV47tyqkr9MmpZA3PXivK4VPrVg=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/daixiang0/gci v0.3.2/go.mod h1:jaASoJmv/ykO9dAAPy31iJnreV19248qKDdVWf3QgC4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -127,822 +31,99 @@ github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018 h1:6xT9KW8zLC
github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4=
github.com/dchest/siphash v1.2.1 h1:4cLinnzVJDKxTCl9B01807Yiy+W7ZzVHj/KIroQRvT4=
github.com/dchest/siphash v1.2.1/go.mod h1:q+IRvb2gOSrUnYoPqHiyHXS0FOBBOdl6tONBlVnOnt4=
-github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
-github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d/go.mod h1:tmAIfUFEirG/Y8jhZ9M+h36obRZAk/1fcSpXwAVlfqE=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
-github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
-github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
-github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
-github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
-github.com/getkin/kin-openapi v0.107.0/go.mod h1:9Dhr+FasATJZjS4iOLvB0hkaxgYdulrNYm2e9epLWOo=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
-github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
-github.com/go-chi/chi/v5 v5.0.7/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
-github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
-github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
-github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
-github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
-github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gofrs/flock v0.7.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
-github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
-github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
-github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
-github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
-github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
-github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
-github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
-github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
-github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
-github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
-github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
-github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
-github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
-github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
-github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
-github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
-github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
-github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
-github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
-github.com/invopop/yaml v0.1.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q=
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
-github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
-github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
-github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/labstack/echo/v4 v4.9.1/go.mod h1:Pop5HLc+xoc4qhTZ1ip6C0RtP7Z+4VzRLWZZFKqbbjo=
-github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
-github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
-github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
-github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y=
-github.com/lestrrat-go/blackmagic v1.0.0/go.mod h1:TNgH//0vYSs8VXDCfkZLgIrVTTXQELZffUV0tz3MtdQ=
-github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=
-github.com/lestrrat-go/iter v1.0.1/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc=
-github.com/lestrrat-go/jwx v1.2.25/go.mod h1:zoNuZymNl5lgdcu6P7K6ie2QRll5HVfF4xwxBBK1NxY=
-github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
-github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
-github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
-github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/matryer/moq v0.2.7/go.mod h1:kITsx543GOENm48TUAQyJ9+SAvFSr7iGQXPoth/VUBk=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
-github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
-github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
-github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
-github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
-github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
-github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
-github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
-github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
-github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/olivere/elastic v6.2.14+incompatible h1:k+KadwNP/dkXE0/eu+T6otk1+5fe0tEpPyQJ4XVm5i8=
github.com/olivere/elastic v6.2.14+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8=
-github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
-github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ=
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
-github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
-github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
-github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig=
-github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
-github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
-github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4=
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
-github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM=
-github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q=
-github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
-github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
-github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
-github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
-github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
-go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
-go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
-go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc=
+golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
-golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
-golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
-golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
-golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
-golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
-golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
-golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
-golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
-golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
-google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
-google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
-google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
-google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
-google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
-google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
-google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
-google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
-google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
-google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
-google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
-google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
-google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
-google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU=
-google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
-google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
-google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
-google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
-google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
-google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
-google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
-google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
-google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
-google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
-google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
-google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
-google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
-google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009 h1:q/fZgS8MMadqFFGa8WL4Oyz+TmjiZfi8UrzWhTl8d5w=
gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009/go.mod h1:O0bY1e/dSoxMYZYTHP0SWKxG5EWLEvKR9/cOjWPPMKU=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
+pgregory.net/rapid v0.6.2 h1:ErW5sL+UKtfBfUTsWHDCoeB+eZKLKMxrSd1VJY6W4bw=
diff --git a/tools/block-generator/run_tests.sh b/tools/block-generator/run_tests.sh
deleted file mode 100755
index fcfc7279e..000000000
--- a/tools/block-generator/run_tests.sh
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/usr/bin/env bash
-
-CONNECTION_STRING=""
-CONDUIT_BINARY=""
-REPORT_DIR=""
-DURATION="1h"
-LOG_LEVEL="error"
-SCENARIOS=""
-
-help() {
- echo "Usage:"
- echo " -v|--verbose enable verbose script output."
- echo " -c|--connection-string"
- echo " PostgreSQL connection string."
- echo " -i|--conduit path to conduit binary."
- echo " -s|--scenarios path to conduit test scenarios."
- echo " -r|--report-dir directory where the report should be written."
- echo " -d|--duration test duration."
- echo " -l|--level log level to pass to conduit."
- echo " -g|--generator block-generator binary to run the generator."
- exit
-}
-
-while :; do
- case "${1-}" in
- -h | --help) help ;;
- -v | --verbose) set -x ;;
- -c | --connection-string)
- CONNECTION_STRING="${2-}"
- shift
- ;;
- -g | --generator)
- GENERATOR_BINARY="${2-}"
- shift
- ;;
- -i | --conduit)
- CONDUIT_BINARY="${2-}"
- shift
- ;;
- -r | --report-dir)
- REPORT_DIR="${2-}"
- shift
- ;;
- -s | --scenarios)
- SCENARIOS="${2-}"
- shift
- ;;
- -d | --duration)
- DURATION="${2-}"
- shift
- ;;
- -l | --level)
- LOG_LEVEL="${2-}"
- shift
- ;;
- -?*) echo "Unknown option: $1" && exit 1;;
- *) break ;;
- esac
- shift
-done
-
-args=("$@")
-
-if [ -z "$CONNECTION_STRING" ]; then
- echo "Missing required connection string parameter (-c / --connection-string)."
- exit 1
-fi
-
-if [ -z "$CONDUIT_BINARY" ]; then
- echo "Missing required conduit binary parameter (-i / --conduit)."
- exit 1
-fi
-
-if [ -z "$SCENARIOS" ]; then
- echo "Missing required conduit test scenario parameter (-s / --scenarios)."
- exit 1
-fi
-
-if [ -z "$GENERATOR_BINARY" ]; then
- echo "path to block-generator binary is required"
- exit 1
-fi
-
-echo "Running with binary: $CONDUIT_BINARY"
-echo "Report directory: $REPORT_DIR"
-echo "Duration: $DURATION"
-echo "Log Level: $LOG_LEVEL"
-
-"$GENERATOR_BINARY" runner \
- -i "$CONDUIT_BINARY" \
- -s "$SCENARIOS" \
- -d "$DURATION" \
- -c "$CONNECTION_STRING" \
- --report-directory "$REPORT_DIR" \
- --log-level "$LOG_LEVEL" \
- --reset-report-dir
-
diff --git a/tools/block-generator/runner/run.go b/tools/block-generator/runner/run.go
index 2e1f970c5..929bdd35e 100644
--- a/tools/block-generator/runner/run.go
+++ b/tools/block-generator/runner/run.go
@@ -19,6 +19,8 @@ package runner
import (
"bytes"
"context"
+ "sort"
+
// embed conduit template config file
_ "embed"
"encoding/json"
@@ -50,7 +52,8 @@ type Args struct {
PostgresConnectionString string
CPUProfilePath string
RunDuration time.Duration
- LogLevel string
+ RunnerVerbose bool
+ ConduitLogLevel string
ReportDirectory string
ResetReportDir bool
RunValidation bool
@@ -88,6 +91,9 @@ func Run(args Args) error {
defer fmt.Println("Done running tests!")
return filepath.Walk(args.Path, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return fmt.Errorf("run.go Run(): failed to walk path: %w", err)
+ }
// Ignore the directory
if info.IsDir() {
return nil
@@ -141,7 +147,7 @@ func (r *Args) run() error {
// Start services
algodNet := fmt.Sprintf("localhost:%d", 11112)
metricsNet := fmt.Sprintf("localhost:%d", r.MetricsPort)
- generatorShutdownFunc, _ := startGenerator(r.Path, nextRound, r.GenesisFile, algodNet, blockMiddleware)
+ generatorShutdownFunc, _ := startGenerator(r.Path, nextRound, r.GenesisFile, r.RunnerVerbose, algodNet, blockMiddleware)
defer func() {
// Shutdown generator.
if err := generatorShutdownFunc(); err != nil {
@@ -161,7 +167,7 @@ func (r *Args) run() error {
}
defer f.Close()
- conduitConfig := config{r.LogLevel, logfile,
+ conduitConfig := config{r.ConduitLogLevel, logfile,
fmt.Sprintf(":%d", r.MetricsPort),
algodNet, r.PostgresConnectionString,
}
@@ -178,8 +184,8 @@ func (r *Args) run() error {
}
defer func() {
// Shutdown conduit
- if err := conduitShutdownFunc(); err != nil {
- fmt.Printf("failed to shutdown Conduit: %s\n", err)
+ if sdErr := conduitShutdownFunc(); sdErr != nil {
+ fmt.Printf("failed to shutdown Conduit: %s\n", sdErr)
}
}()
@@ -379,16 +385,30 @@ func (r *Args) runTest(report *os.File, metricsURL string, generatorURL string)
if err = json.NewDecoder(resp.Body).Decode(&generatorReport); err != nil {
return fmt.Errorf("problem decoding generator report: %w", err)
}
- for metric, entry := range generatorReport {
+
+ effects := generator.CumulativeEffects(generatorReport)
+ keys := make([]string, 0, len(effects))
+ for k := range effects {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ allTxns := uint64(0)
+ for _, metric := range keys {
// Skip this one
if metric == "genesis" {
continue
}
- str := fmt.Sprintf("transaction_%s_total:%d\n", metric, entry.GenerationCount)
+ txCount := effects[metric]
+ allTxns += txCount
+ str := fmt.Sprintf("transaction_%s_total:%d\n", metric, txCount)
if _, err = report.WriteString(str); err != nil {
return fmt.Errorf("unable to write transaction_count metric: %w", err)
}
}
+ str := fmt.Sprintf("transaction_%s_total:%d\n", "ALL", allTxns)
+ if _, err = report.WriteString(str); err != nil {
+ return fmt.Errorf("unable to write transaction_count metric: %w", err)
+ }
// Record a rate from one of the first data points.
if len(collector.Data) > 5 {
@@ -406,9 +426,9 @@ func (r *Args) runTest(report *os.File, metricsURL string, generatorURL string)
}
// startGenerator starts the generator server.
-func startGenerator(configFile string, dbround uint64, genesisFile string, addr string, blockMiddleware func(http.Handler) http.Handler) (func() error, generator.Generator) {
+func startGenerator(configFile string, dbround uint64, genesisFile string, verbose bool, addr string, blockMiddleware func(http.Handler) http.Handler) (func() error, generator.Generator) {
// Start generator.
- server, generator := generator.MakeServerWithMiddleware(dbround, genesisFile, configFile, addr, blockMiddleware)
+ server, generator := generator.MakeServerWithMiddleware(dbround, genesisFile, configFile, verbose, addr, blockMiddleware)
// Start the server
go func() {
@@ -456,7 +476,7 @@ func startConduit(dataDir string, conduitBinary string, round uint64) (func() er
}
}
if err := cmd.Wait(); err != nil {
- fmt.Printf("ignoring error while waiting for process to stop: %s\n", err)
+ fmt.Printf("exiting block generator runner: %s\n", err)
}
return nil
}, nil
diff --git a/tools/block-generator/runner/runner.go b/tools/block-generator/runner/runner.go
index 32598b924..1bb3cd9cf 100644
--- a/tools/block-generator/runner/runner.go
+++ b/tools/block-generator/runner/runner.go
@@ -36,6 +36,7 @@ func init() {
Short: "Run test suite and collect results.",
Long: "Run an automated test suite using the block-generator daemon and a provided conduit binary. Results are captured to a specified output directory.",
Run: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("starting block-generator runner with args: %+v\n", runnerArgs)
if err := Run(runnerArgs); err != nil {
fmt.Println(err)
}
@@ -48,7 +49,8 @@ func init() {
RunnerCmd.Flags().StringVarP(&runnerArgs.PostgresConnectionString, "postgres-connection-string", "c", "", "Postgres connection string.")
RunnerCmd.Flags().DurationVarP(&runnerArgs.RunDuration, "test-duration", "d", 5*time.Minute, "Duration to use for each scenario.")
RunnerCmd.Flags().StringVarP(&runnerArgs.ReportDirectory, "report-directory", "r", "", "Location to place test reports.")
- RunnerCmd.Flags().StringVarP(&runnerArgs.LogLevel, "log-level", "l", "error", "LogLevel to use when starting Conduit. [panic, fatal, error, warn, info, debug, trace]")
+ RunnerCmd.Flags().BoolVarP(&runnerArgs.RunnerVerbose, "verbose", "v", false, "If set the runner will print debugging information from the generator and ledger.")
+ RunnerCmd.Flags().StringVarP(&runnerArgs.ConduitLogLevel, "conduit-log-level", "l", "error", "LogLevel to use when starting Conduit. [panic, fatal, error, warn, info, debug, trace]")
RunnerCmd.Flags().StringVarP(&runnerArgs.CPUProfilePath, "cpuprofile", "", "", "Path where Conduit writes its CPU profile.")
RunnerCmd.Flags().BoolVarP(&runnerArgs.ResetReportDir, "reset-report-dir", "", false, "If set any existing report directory will be deleted before running tests.")
RunnerCmd.Flags().BoolVarP(&runnerArgs.RunValidation, "validate", "", false, "If set the validator will run after test-duration has elapsed to verify data is correct. An extra line in each report indicates validator success or failure.")
diff --git a/tools/block-generator/scenarios/benchmarks/organic.25000.yml b/tools/block-generator/scenarios/benchmarks/organic.25000.yml
new file mode 100644
index 000000000..5d431c6e3
--- /dev/null
+++ b/tools/block-generator/scenarios/benchmarks/organic.25000.yml
@@ -0,0 +1,29 @@
+name: "Organic (25000)"
+genesis_accounts: 10000
+genesis_account_balance: 1000000000000
+tx_per_block: 25000
+
+# transaction distribution
+tx_pay_fraction: 0.05
+tx_asset_fraction: 0.75
+tx_app_fraction: 0.20
+
+# payment config
+pay_acct_create_fraction: 0.10
+pay_xfer_fraction: 0.90
+
+# asset config
+asset_create_fraction: 0.001
+asset_optin_fraction: 0.1
+asset_close_fraction: 0.05
+asset_xfer_fraction: 0.849
+asset_delete_fraction: 0
+
+# app kind config
+app_boxes_fraction: 1.0
+app_swap_fraction: 0.0
+
+# app boxes config
+app_boxes_create_fraction: 0.01
+app_boxes_optin_fraction: 0.1
+app_boxes_call_fraction: 0.89
diff --git a/tools/block-generator/scenarios/benchmarks/organic.50000.yml b/tools/block-generator/scenarios/benchmarks/organic.50000.yml
new file mode 100644
index 000000000..cf0f5437c
--- /dev/null
+++ b/tools/block-generator/scenarios/benchmarks/organic.50000.yml
@@ -0,0 +1,29 @@
+name: "Organic (50000)"
+genesis_accounts: 10000
+genesis_account_balance: 1000000000000
+tx_per_block: 50000
+
+# transaction distribution
+tx_pay_fraction: 0.05
+tx_asset_fraction: 0.75
+tx_app_fraction: 0.20
+
+# payment config
+pay_acct_create_fraction: 0.10
+pay_xfer_fraction: 0.90
+
+# asset config
+asset_create_fraction: 0.001
+asset_optin_fraction: 0.1
+asset_close_fraction: 0.05
+asset_xfer_fraction: 0.849
+asset_delete_fraction: 0
+
+# app kind config
+app_boxes_fraction: 1.0
+app_swap_fraction: 0.0
+
+# app boxes config
+app_boxes_create_fraction: 0.01
+app_boxes_optin_fraction: 0.1
+app_boxes_call_fraction: 0.89
diff --git a/tools/block-generator/scenarios/benchmarks/payment.25000.yml b/tools/block-generator/scenarios/benchmarks/payment.25000.yml
new file mode 100644
index 000000000..6c34ce42c
--- /dev/null
+++ b/tools/block-generator/scenarios/benchmarks/payment.25000.yml
@@ -0,0 +1,11 @@
+name: "Max TPS (25000)"
+genesis_accounts: 10000
+genesis_account_balance: 1000000000000
+tx_per_block: 25000
+
+# transaction distribution
+tx_pay_fraction: 1.0
+
+# payment config
+pay_acct_create_fraction: 0.02
+pay_xfer_fraction: 0.98
diff --git a/tools/block-generator/scenarios/benchmarks/payment.50000.yml b/tools/block-generator/scenarios/benchmarks/payment.50000.yml
new file mode 100644
index 000000000..b174f5d28
--- /dev/null
+++ b/tools/block-generator/scenarios/benchmarks/payment.50000.yml
@@ -0,0 +1,11 @@
+name: "Max TPS (50000)"
+genesis_accounts: 10000
+genesis_account_balance: 1000000000000
+tx_per_block: 50000
+
+# transaction distribution
+tx_pay_fraction: 1.0
+
+# payment config
+pay_acct_create_fraction: 0.02
+pay_xfer_fraction: 0.98
diff --git a/tools/block-generator/scenarios/benchmarks/stress.25000.yml b/tools/block-generator/scenarios/benchmarks/stress.25000.yml
new file mode 100644
index 000000000..0f83ae725
--- /dev/null
+++ b/tools/block-generator/scenarios/benchmarks/stress.25000.yml
@@ -0,0 +1,28 @@
+name: "Stress (25000)"
+genesis_accounts: 10000
+genesis_account_balance: 1000000000000
+tx_per_block: 25000
+
+# transaction distribution
+tx_pay_fraction: 0.05
+tx_app_fraction: 0.95
+
+# payment config
+pay_acct_create_fraction: 1.00
+pay_xfer_fraction: 0.00
+
+# asset config
+asset_create_fraction: 0.001
+asset_optin_fraction: 0.1
+asset_close_fraction: 0.05
+asset_xfer_fraction: 0.849
+asset_delete_fraction: 0
+
+# app kind config
+app_boxes_fraction: 1.0
+app_swap_fraction: 0.0
+
+# app boxes config
+app_boxes_create_fraction: 0.5
+app_boxes_optin_fraction: 0.5
+app_boxes_call_fraction: 0.0
diff --git a/tools/block-generator/scenarios/benchmarks/stress.50000.yml b/tools/block-generator/scenarios/benchmarks/stress.50000.yml
new file mode 100644
index 000000000..7679f0e62
--- /dev/null
+++ b/tools/block-generator/scenarios/benchmarks/stress.50000.yml
@@ -0,0 +1,28 @@
+name: "Stress (50000)"
+genesis_accounts: 10000
+genesis_account_balance: 1000000000000
+tx_per_block: 50000
+
+# transaction distribution
+tx_pay_fraction: 0.05
+tx_app_fraction: 0.95
+
+# payment config
+pay_acct_create_fraction: 1.00
+pay_xfer_fraction: 0.00
+
+# asset config
+asset_create_fraction: 0.001
+asset_optin_fraction: 0.1
+asset_close_fraction: 0.05
+asset_xfer_fraction: 0.849
+asset_delete_fraction: 0
+
+# app kind config
+app_boxes_fraction: 1.0
+app_swap_fraction: 0.0
+
+# app boxes config
+app_boxes_create_fraction: 0.5
+app_boxes_optin_fraction: 0.5
+app_boxes_call_fraction: 0.0
diff --git a/tools/block-generator/scenarios/config.allmixed.jumbo.yml b/tools/block-generator/scenarios/config.allmixed.jumbo.yml
new file mode 100644
index 000000000..bc6f1a56a
--- /dev/null
+++ b/tools/block-generator/scenarios/config.allmixed.jumbo.yml
@@ -0,0 +1,31 @@
+name: "App Create"
+tx_per_block: 25000
+
+# transaction distribution
+tx_pay_fraction: 0.25
+tx_asset_fraction: 0.25
+tx_app_fraction: 0.50
+
+# payment config
+pay_acct_create_fraction: 0.02
+pay_xfer_fraction: 0.98
+
+# asset config
+asset_create_fraction: 0.001
+asset_optin_fraction: 0.1
+asset_close_fraction: 0.05
+asset_xfer_fraction: 0.849
+asset_delete_fraction: 0
+
+# app kind config
+app_boxes_fraction: 0.5
+app_swap_fraction: 0.5
+
+# app boxes config
+app_boxes_create_fraction: 0.01
+app_boxes_optin_fraction: 0.1
+app_boxes_call_fraction: 0.89
+
+# app swap config
+app_swap_create_fraction: 1.0
+
diff --git a/tools/block-generator/scenarios/config.allmixed.small.yml b/tools/block-generator/scenarios/config.allmixed.small.yml
new file mode 100644
index 000000000..f4ef21657
--- /dev/null
+++ b/tools/block-generator/scenarios/config.allmixed.small.yml
@@ -0,0 +1,31 @@
+name: "App Create"
+tx_per_block: 100
+
+# transaction distribution
+tx_pay_fraction: 0.25
+tx_asset_fraction: 0.25
+tx_app_fraction: 0.50
+
+# payment config
+pay_acct_create_fraction: 0.02
+pay_xfer_fraction: 0.98
+
+# asset config
+asset_create_fraction: 0.001
+asset_optin_fraction: 0.1
+asset_close_fraction: 0.05
+asset_xfer_fraction: 0.849
+asset_delete_fraction: 0
+
+# app kind config
+app_boxes_fraction: 0.5
+app_swap_fraction: 0.5
+
+# app boxes config
+app_boxes_create_fraction: 0.01
+app_boxes_optin_fraction: 0.1
+app_boxes_call_fraction: 0.89
+
+# app swap config
+app_swap_create_fraction: 1.0
+
diff --git a/tools/block-generator/scenarios/config.appboxes.small.yml b/tools/block-generator/scenarios/config.appboxes.small.yml
new file mode 100644
index 000000000..48f37257f
--- /dev/null
+++ b/tools/block-generator/scenarios/config.appboxes.small.yml
@@ -0,0 +1,19 @@
+name: "Boxes Optins Small"
+tx_per_block: 100
+
+tx_app_fraction: 1
+
+# payment config
+pay_acct_create_fraction: 0.02
+pay_xfer_fraction: 0.98
+
+# asset config
+tx_asset_create_fraction: 1.0
+
+# app kind config
+app_boxes_fraction: 1.0
+
+# app boxes config
+app_boxes_create_fraction: 0.01
+app_boxes_optin_fraction: 0.89
+app_boxes_call_fraction: 0.1
diff --git a/tools/block-generator/scenarios/config.appcreate.small.yml b/tools/block-generator/scenarios/config.appcreate.small.yml
new file mode 100644
index 000000000..3e02ae431
--- /dev/null
+++ b/tools/block-generator/scenarios/config.appcreate.small.yml
@@ -0,0 +1,19 @@
+name: "App Create"
+tx_per_block: 100
+
+# transaction distribution
+tx_asset_fraction: 0.05
+tx_app_fraction: 0.95
+
+# asset config
+tx_asset_create_fraction: 1.0
+
+# app kind config
+app_swap_fraction: 0.5
+app_boxes_fraction: 0.5
+
+# app swap config
+app_swap_create_fraction: 1.0
+
+# app boxes config
+app_boxes_create_fraction: 1.0
diff --git a/tools/block-generator/scripts/print_tps.py b/tools/block-generator/scripts/print_tps.py
new file mode 100644
index 000000000..3401c5515
--- /dev/null
+++ b/tools/block-generator/scripts/print_tps.py
@@ -0,0 +1,72 @@
+import os
+import argparse
+
+TXN_PER_BLOCK = 5000
+TXN_PER_BLOCK_SM = 100
+TXN_PER_BLOCK_JUMBO = 25000
+
+parser = argparse.ArgumentParser(description="Parse block TPS from reports")
+parser.add_argument(
+ "-d",
+ "--perf-report-dir",
+ required=False,
+ dest="dir",
+ help="report directory created by the block generator",
+)
+
+parser.add_argument(
+ "-c",
+ "--conduit-version",
+ required=False,
+ dest="conduit_version",
+ help="Release version or the commit hash of the Conduit binary used during the performance test",
+)
+
+parser.add_argument(
+ "-s" "--database-description",
+ required=False,
+ dest="database_description",
+ help="A short description of the database state used for the performance test",
+)
+
+args = parser.parse_args()
+
+
+def parse_report(report):
+ data = dict()
+ with open(report) as f:
+ for line in f:
+ tag, value = line.split(":")
+ data[tag] = value if tag == "scenario" else float(value)
+ return data
+
+
+def pretty_print(data):
+ table_header = (
+ f"Scenario,Conduit_Version,{args.database_description}"
+ )
+ print(table_header)
+ for d in data:
+ scenario = d["scenario"].split("config.")[1]
+ scenario_parsed = scenario.split(".yml")[0]
+ txn_per_block = TXN_PER_BLOCK
+ if "sm" in scenario:
+ txn_per_block = TXN_PER_BLOCK_SM
+ elif "jumbo" in scenario:
+ txn_per_block = TXN_PER_BLOCK_JUMBO
+
+ printed_scenario = (
+ f"{scenario_parsed}({txn_per_block})"
+ )
+ print(
+ f"{printed_scenario},{args.conduit_version},{d['final_overall_transactions_per_second']:.2f}"
+ )
+
+
+if __name__ == "__main__":
+ data_list = []
+ for f in os.listdir(args.dir):
+ if f.endswith(".report"):
+ report_data = parse_report(args.dir + f)
+ data_list.append(report_data)
+ pretty_print(data_list)
diff --git a/tools/block-generator/run_postgres.sh b/tools/block-generator/scripts/run_postgres.sh
index 2c8175bb9..490404aff 100755
--- a/tools/block-generator/run_postgres.sh
+++ b/tools/block-generator/scripts/run_postgres.sh
@@ -14,8 +14,6 @@ set -e
POSTGRES_CONTAINER=generator-test-container
POSTGRES_PORT=15432
POSTGRES_DATABASE=generator_db
-CONFIG=${1:-"$(dirname $0)/test_config.yml"}
-echo "Using config file: $CONFIG"
function start_postgres() {
docker rm -f $POSTGRES_CONTAINER > /dev/null 2>&1 || true
diff --git a/tools/block-generator/scripts/run_runner.py b/tools/block-generator/scripts/run_runner.py
new file mode 100644
index 000000000..5f0753930
--- /dev/null
+++ b/tools/block-generator/scripts/run_runner.py
@@ -0,0 +1,201 @@
+import argparse
+import os
+from pathlib import Path
+import shlex
+import subprocess
+import sys
+import time
+
+
+POSTGRES_CONTAINER = "generator-test-container"
+POSTGRES_PORT = 15432
+POSTGRES_DATABASE = "generator_db"
+
+REPORT_DIRECTORY = "../../tmp/OUTPUT_RUN_RUNNER_TEST"
+
+CWD = Path.cwd()
+
+NL = "\n"
+BS = "\\"
+DBS = BS * 2
+Q = '"'
+SQ = ' "'
+
+
+def run_cmd(cmd):
+ print(f"Running command: {cmd}")
+ process = subprocess.Popen(
+ shlex.split(cmd.replace("\\\n", " ")),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ stdout, stderr = process.communicate()
+ if (rcode := process.returncode) != 0:
+ print(f"Error executing command: {cmd}")
+ print(stderr.decode())
+ sys.exit(rcode)
+ return stdout.decode()
+
+
+def up(args):
+ run_cmd(f"docker rm -f {args.pg_container}")
+ run_cmd(
+ f"docker run -d --name {args.pg_container} -e POSTGRES_USER=algorand -e POSTGRES_PASSWORD=algorand -p {args.pg_port}:5432 postgres"
+ )
+ time.sleep(5)
+
+ run_cmd(
+ f'docker exec -it {args.pg_container} psql -Ualgorand -c "create database {args.pg_database}"'
+ )
+
+
+def down(args):
+ run_cmd(f"docker rm -f {args.pg_container}")
+
+
+def launch_json_args(cmd: str):
+ def tighten(x):
+ return x.replace(" \\", "\\")
+
+ def wrap(x):
+ return tighten(x) if x.startswith('"') else f'"{tighten(x)}"'
+
+ newlines = []
+ lines = cmd.splitlines()
+ for i, line in enumerate(lines):
+ if i == 0:
+ continue
+ if not line.startswith("--"):
+ aline = wrap(line.replace(" ", ""))
+ else:
+ aline = ", ".join(map(wrap, line.split(" ", maxsplit=1)))
+
+ if i < len(lines) - 1:
+ aline += ","
+
+ newlines.append(aline)
+ return f"[{(NL.join(newlines)).replace(BS, '')}]"
+
+
+def parse_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--conduit-binary", help="Path to conduit binary")
+ parser.add_argument(
+ "--scenario",
+ default=(default := CWD.parents[1] / "test_scenario.yml"),
+ help=f"Scenario configuration file ({default=!s})",
+ )
+ parser.add_argument(
+ "--reset-db",
+ action="store_true",
+ default=False,
+ help="Reset the DB and start at round 0 (default=False)",
+ )
+ parser.add_argument(
+ "--purge",
+ action="store_true",
+ default=False,
+ help="Shutdown container that has been kept alive (default=False)",
+ )
+ parser.add_argument(
+ "--keep-alive",
+ action="store_true",
+ default=False,
+ help="Keep postgres container alive at end of run (default=False)",
+ )
+ parser.add_argument(
+ "--pg-container",
+ default=(default := POSTGRES_CONTAINER),
+ help=f"Name of postgres container ({default=})",
+ )
+ parser.add_argument(
+ "--pg-port",
+ default=(default := POSTGRES_PORT),
+ help=f"Postgres port ({default=})",
+ )
+ parser.add_argument(
+ "--pg-database",
+ default=(default := POSTGRES_DATABASE),
+ help=f"Postgres database ({default=})",
+ )
+ parser.add_argument(
+ "--report-directory",
+ default=(default := REPORT_DIRECTORY),
+ help=f"Report directory ({default=})",
+ )
+ parser.add_argument(
+ "--build-generator",
+ action="store_true",
+ default=False,
+ help="Build the generator binary (default=False)",
+ )
+ parser.add_argument(
+ "--skip-runner",
+ action="store_true",
+ default=False,
+ help="Skip running the generator (default=False)",
+ )
+ parser.add_argument(
+ "--test-duration",
+ default=(default := "30s"),
+ help=f"Test duration ({default=})",
+ )
+
+ args = parser.parse_args()
+ print(args)
+ return args
+
+
+def main():
+ args = parse_args()
+
+ try:
+ if not args.purge:
+ print(f"Using scenario file: {args.scenario}")
+ print(f"!!! rm -rf {args.report_directory} !!!")
+ run_cmd(f"rm -rf {args.report_directory}")
+
+ if args.build_generator:
+ print("Building generator.")
+ os.chdir(CWD)
+ run_cmd("go build")
+ os.chdir("..")
+ else:
+ print("Skipping generator build.")
+
+ print("Starting postgres container.")
+ up(args)
+
+ SLNL = "\\\n"
+ generator_cmd = f"""{CWD}/block-generator \\
+runner \\
+--conduit-binary "{args.conduit_binary}" \\
+--report-directory {args.report_directory} \\
+--test-duration {args.test_duration} \\
+--conduit-log-level trace \\
+--postgres-connection-string "host=localhost user=algorand password=algorand dbname={args.pg_database} port={args.pg_port} sslmode=disable" \\
+--scenario {args.scenario} {DBS + NL + '--reset-db' if args.reset_db else ''}"""
+ if args.skip_runner:
+ print("Skipping test runner.")
+ print(f"Run it yourself:\n{generator_cmd}")
+ print(
+ f"""`launch.json` args:
+{launch_json_args(generator_cmd)}"""
+ )
+ else:
+ print("Starting test runner")
+ run_cmd(generator_cmd)
+ else:
+ print("Purging postgres container - NO OTHER ACTION TAKEN")
+ down(args)
+ finally:
+ if not args.keep_alive:
+ print("Stopping postgres container.")
+ down(args)
+ else:
+ print(f"Keeping postgres container alive: {args.pg_container}")
+ print(f"Also, not removing report directory: {args.report_directory}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/block-generator/run_runner.sh b/tools/block-generator/scripts/run_runner.sh
index 5e8396631..5643ab9c8 100755
--- a/tools/block-generator/run_runner.sh
+++ b/tools/block-generator/scripts/run_runner.sh
@@ -15,7 +15,7 @@ fi
POSTGRES_CONTAINER=generator-test-container
POSTGRES_PORT=15432
POSTGRES_DATABASE=generator_db
-SCENARIO=${2:-"$(dirname $0)/test_config.yml"}
+SCENARIO=${2:-"$(dirname $0)/../test_scenario.yml"}
echo "Using scenario config file: $SCENARIO"
function start_postgres() {
@@ -51,10 +51,10 @@ echo "Starting postgres container."
start_postgres
echo "Starting test runner"
$(dirname "$0")/block-generator runner \
- --conduit-binary "$CONDUIT_BINARY" \
- --report-directory $OUTPUT \
- --test-duration 30s \
- --log-level trace \
- --postgres-connection-string "host=localhost user=algorand password=algorand dbname=generator_db port=15432 sslmode=disable" \
- --scenario ${SCENARIO} \
+ --conduit-binary "$CONDUIT_BINARY" \
+ --report-directory $OUTPUT \
+ --test-duration 30s \
+ --conduit-log-level trace \
+ --postgres-connection-string "host=localhost user=algorand password=algorand dbname=generator_db port=15432 sslmode=disable" \
+ --scenario ${SCENARIO} \
--reset-db
diff --git a/tools/block-generator/upload_metrics.py b/tools/block-generator/upload_metrics.py
deleted file mode 100644
index f623a0c55..000000000
--- a/tools/block-generator/upload_metrics.py
+++ /dev/null
@@ -1,53 +0,0 @@
-from datadog import initialize
-from datadog import api
-import os
-import argparse
-
-parser = argparse.ArgumentParser(description="Upload performance metrics to Datadog")
-parser.add_argument(
- "-f",
- "--perf-reports",
- required=True,
- action="store",
- dest="files",
- type=str,
- nargs="*",
- help="list of reports created by the block generator",
-)
-parser.add_argument(
- "-c",
- "--binary-version",
- required=True,
- help="Release version or the commit hash of the Conduit binary used during the performance test",
-)
-args = parser.parse_args()
-
-
-def parse_report(report):
- data = dict()
- with open(report) as f:
- for line in f:
- tag, value = line.split(":")
- data[tag] = value if tag == "scenario" else float(value)
- return data
-
-
-if __name__ == "__main__":
- print("initializing datadog")
- options = {
- "api_key": os.getenv("DATADOG_API_KEY"),
- "app_key": os.getenv("DATADOG_APP_KEY"),
- }
- initialize(**options)
- for fp in args.files:
- print(f"uploading metrics for {fp}")
- data = parse_report(fp)
- tags = [
- f"conduit_version:{args.binary_version}",
- f'duration:{data["test_duration_seconds"]}s',
- f'scenario:{data["scenario"]}',
- ]
- transactionsPerBlockAvgMetricName = "conduit.perf.transactions_per_second"
- tps = data["final_overall_transactions_per_second"]
- api.Metric.send(metric=transactionsPerBlockAvgMetricName, points=tps, tags=tags)
- print("uploaded metrics")
diff --git a/tools/block-generator/util/util.go b/tools/block-generator/util/util.go
index b36fca9ee..ccaafab84 100644
--- a/tools/block-generator/util/util.go
+++ b/tools/block-generator/util/util.go
@@ -23,6 +23,7 @@ import (
"fmt"
"os"
"strings"
+
// import postgres driver
_ "github.com/lib/pq"
)
@@ -70,7 +71,7 @@ func EmptyDB(postgresConnectionString string) error {
return fmt.Errorf("postgres connection string did not work: %w", err)
}
defer conn.Close()
- query := `DROP SCHEMA public CASCADE; CREATE SCHEMA public;`
+ query := `DROP SCHEMA IF EXISTS public CASCADE; CREATE SCHEMA public;`
if _, err = conn.Exec(query); err != nil {
return fmt.Errorf("unable to reset postgres DB: %w", err)
}
diff --git a/tools/debug/carpenter/main.go b/tools/debug/carpenter/main.go
index 625211694..57e90a810 100644
--- a/tools/debug/carpenter/main.go
+++ b/tools/debug/carpenter/main.go
@@ -136,7 +136,6 @@ func (rps roundperiodstep) Format(f fmt.State, c rune) {
fmt.Fprintf(f, "%s%s.%s.%s", leadingspaces, colorize(r), colorize(p), colorize(s))
}
-//
func setupInputStream() io.ReadCloser {
var inputStream io.ReadCloser = os.Stdin
diff --git a/tools/debug/dumpblocks/main.go b/tools/debug/dumpblocks/main.go
index a932d9e60..663945d83 100644
--- a/tools/debug/dumpblocks/main.go
+++ b/tools/debug/dumpblocks/main.go
@@ -87,7 +87,7 @@ func main() {
}
}
if minRound == 0 {
- err := db.QueryRow("SELECT MIN(rnd) FROM blocks").Scan(&minRound)
+ err = db.QueryRow("SELECT MIN(rnd) FROM blocks").Scan(&minRound)
if err != nil {
panic(err)
}
diff --git a/tools/debug/transplanter/main.go b/tools/debug/transplanter/main.go
new file mode 100644
index 000000000..1c7a3a6b4
--- /dev/null
+++ b/tools/debug/transplanter/main.go
@@ -0,0 +1,477 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package main
+
+import (
+ "bufio"
+ "encoding/binary"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/golang/snappy"
+ _ "github.com/mattn/go-sqlite3"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/pools"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/node"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-codec/codec"
+)
+
+var dataDir = flag.String("d", "", "Data directory to track to get files from")
+var roundStart = flag.Int("r", 0, "Target round number to catch up to")
+var txnDir = flag.String("t", "", "Directory to read transaction files from")
+var txnFile = flag.String("tfile", "", "File to read transaction from")
+var txnLogDir = flag.String("txlog", "", "Directory to read txlog files from")
+var txnLogFile = flag.String("txlogfile", "", "File to read txlog from")
+var force = flag.Bool("y", false, "Suppress confirmation")
+var blockSize = flag.Int("b", 1000, "Number of transaction groups per block")
+
+var help = flag.Bool("help", false, "Show help")
+var helpShort = flag.Bool("h", false, "Show help")
+
+func usage() {
+ fmt.Fprintln(os.Stderr, "Utility to transplant transaction into real ledger")
+ flag.Usage()
+}
+
+func decodeTxGroup(data []byte) ([]transactions.SignedTxn, error) {
+ unverifiedTxGroup := make([]transactions.SignedTxn, 1)
+ dec := protocol.NewMsgpDecoderBytes(data)
+ ntx := 0
+
+ for {
+ if len(unverifiedTxGroup) == ntx {
+ n := make([]transactions.SignedTxn, len(unverifiedTxGroup)*2)
+ copy(n, unverifiedTxGroup)
+ unverifiedTxGroup = n
+ }
+ err := dec.Decode(&unverifiedTxGroup[ntx])
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return nil, fmt.Errorf("received a non-decodable txn: %v", err)
+ }
+ ntx++
+ if ntx >= config.MaxTxGroupSize {
+ // max ever possible group size reached, done reading input.
+ if dec.Remaining() > 0 {
+ // if something else left in the buffer - this is an error, drop
+ return nil, fmt.Errorf("received large txn group: %v", err)
+ }
+ }
+ }
+ return unverifiedTxGroup, nil
+}
+
+func decodeTxGroupSlices(data []byte) ([]transactions.SignedTxn, error) {
+ var result [][]transactions.SignedTxn
+ err := protocol.DecodeReflect(data, &result)
+ if err != nil {
+ return nil, fmt.Errorf("received a non-decodable txn slices: %v", err)
+ }
+ return result[0], nil
+}
+
+func getConfig() (config.Local, string, error) {
+ absolutePath, err := filepath.Abs(*dataDir)
+ if err != nil {
+ return config.Local{}, "", fmt.Errorf("can't convert data directory's path to absolute, %v", *dataDir)
+ }
+ cfg, err := config.LoadConfigFromDisk(absolutePath)
+ if err != nil {
+ return config.Local{}, "", fmt.Errorf("load config: %v", err)
+ }
+ return cfg, absolutePath, nil
+}
+
+func prepareGenesis() (ledgercore.InitState, bookkeeping.Genesis, string, error) {
+ genesisPath := filepath.Join(*dataDir, config.GenesisJSONFile)
+
+ // Load genesis
+ genesisText, err := os.ReadFile(genesisPath)
+ if err != nil {
+ return ledgercore.InitState{}, bookkeeping.Genesis{}, "", fmt.Errorf("read genesis file %s: %v", genesisPath, err)
+ }
+
+ var genesis bookkeeping.Genesis
+ err = protocol.DecodeJSON(genesisText, &genesis)
+ if err != nil {
+ return ledgercore.InitState{}, bookkeeping.Genesis{}, "", fmt.Errorf("parse genesis file %s: %v", genesisPath, err)
+ }
+
+ genesisDir := filepath.Join(*dataDir, genesis.ID())
+ ledgerPathnamePrefix := filepath.Join(genesisDir, config.LedgerFilenamePrefix)
+
+ genalloc, err := genesis.Balances()
+ if err != nil {
+ return ledgercore.InitState{}, bookkeeping.Genesis{}, "", fmt.Errorf("load genesis allocation: %v", err)
+ }
+ genBlock, err := bookkeeping.MakeGenesisBlock(genesis.Proto, genalloc, genesis.ID(), genesis.Hash())
+ if err != nil {
+ return ledgercore.InitState{}, bookkeeping.Genesis{}, "", fmt.Errorf("make genesis block: %v", err)
+ }
+ genesisInitState := ledgercore.InitState{
+ Block: genBlock,
+ Accounts: genalloc.Balances,
+ GenesisHash: genesis.Hash(),
+ }
+ return genesisInitState, genesis, ledgerPathnamePrefix, nil
+}
+
+type headerRow struct {
+ Ts time.Time
+ IP string
+ Port int
+}
+
+type headerDecoder interface {
+ decodeHeader(*snappy.Reader) (*headerRow, int, error)
+}
+
+type decoderV1 struct{}
+
+func (decoderV1) decodeHeader(r *snappy.Reader) (*headerRow, int, error) {
+ headerBytes := make([]byte, 12)
+ n, err := io.ReadFull(r, headerBytes)
+ if err != nil {
+ return nil, 0, err
+ } else if n != 12 {
+ return nil, 0, errors.New("incomplete v1 header")
+ }
+ ts := int64(binary.BigEndian.Uint64(headerBytes))
+ tsTime := time.Unix(0, ts)
+ lenMsg := binary.BigEndian.Uint32(headerBytes[8:])
+ return &headerRow{Ts: tsTime}, int(lenMsg), nil
+}
+
+type decoderV2 struct{}
+
+func (decoderV2) decodeHeader(r *snappy.Reader) (*headerRow, int, error) {
+ headerBytes := make([]byte, 18)
+ n, err := io.ReadFull(r, headerBytes)
+ if err != nil {
+ return nil, 0, err
+ } else if n != 18 {
+ return nil, 0, errors.New("incomplete v2 header")
+ }
+ ts := int64(binary.BigEndian.Uint64(headerBytes))
+ tsTime := time.Unix(0, ts)
+ ip := net.IP(headerBytes[8:12])
+ port := binary.BigEndian.Uint16(headerBytes[12:14])
+ lenMsg := binary.BigEndian.Uint32(headerBytes[14:])
+ return &headerRow{Ts: tsTime, IP: ip.String(), Port: int(port)}, int(lenMsg), nil
+}
+
+type txGroupItem struct {
+ err error
+ path string
+ ts time.Time
+ txgroup []transactions.SignedTxn
+}
+
+func transcribeSnappyLog(filePath string, output chan txGroupItem, wg *sync.WaitGroup) {
+ if wg != nil {
+ defer wg.Done()
+ }
+ file, err := os.OpenFile(filePath, os.O_RDONLY, 0644)
+ if err != nil {
+ output <- txGroupItem{err: err}
+ return
+ }
+ defer file.Close()
+
+ decoder := decoderV2{}
+ snappyReader := snappy.NewReader(file)
+ var n int
+
+ for {
+ headers, lenMsg, err := decoder.decodeHeader(snappyReader)
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ output <- txGroupItem{err: err}
+ return
+ }
+
+ msgBuff := make([]byte, lenMsg)
+ n, err = io.ReadFull(snappyReader, msgBuff)
+ if err == io.EOF {
+ output <- txGroupItem{err: fmt.Errorf("missing body in %s", filePath)}
+ return
+ }
+ if n != int(lenMsg) {
+ output <- txGroupItem{err: fmt.Errorf("incomplete message body in %s", filePath)}
+ return
+ }
+
+ dec := codec.NewDecoderBytes(msgBuff, new(codec.MsgpackHandle))
+ var txgroup []transactions.SignedTxn
+ for {
+ var stx transactions.SignedTxn
+ err := dec.Decode(&stx)
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ output <- txGroupItem{err: err}
+ return
+ }
+ txgroup = append(txgroup, stx)
+ }
+ output <- txGroupItem{ts: headers.Ts, txgroup: txgroup, path: filePath}
+ }
+}
+
+func readTransactions(output chan txGroupItem) {
+ defer close(output)
+ if len(*txnFile) > 0 {
+ data, err := os.ReadFile(*txnFile)
+ if err != nil {
+ err = fmt.Errorf("cannot read transaction file %s: %v", *txnFile, err)
+ output <- txGroupItem{err: err}
+ return
+ }
+
+ txgroup, err := decodeTxGroup(data)
+ if err != nil {
+ txgroup, err = decodeTxGroupSlices(data)
+ if err != nil {
+ err = fmt.Errorf("cannot decode transaction file %s: %v", *txnFile, err)
+ output <- txGroupItem{err: err}
+ return
+ }
+ }
+ output <- txGroupItem{ts: time.Time{}, path: *txnFile, txgroup: txgroup}
+ } else if len(*txnDir) > 0 {
+ files, err := os.ReadDir(*txnDir)
+ if err != nil {
+ err = fmt.Errorf("cannot read transaction directory %s: %v", *txnDir, err)
+ output <- txGroupItem{err: err}
+ return
+ }
+ for _, file := range files {
+ if file.IsDir() {
+ continue
+ }
+ path := filepath.Join(*txnDir, file.Name())
+ data, err := os.ReadFile(path)
+ if err != nil {
+ err = fmt.Errorf("cannot read transaction file %s: %v", path, err)
+ output <- txGroupItem{err: err}
+ return
+ }
+
+ txgroup, err := decodeTxGroup(data)
+ if err != nil {
+ err = fmt.Errorf("cannot decode transaction file %s: %v", path, err)
+ output <- txGroupItem{err: err}
+ return
+ }
+
+ output <- txGroupItem{ts: time.Time{}, path: path, txgroup: txgroup}
+ }
+ } else {
+ if len(*txnLogDir) > 0 {
+ files, err := os.ReadDir(*txnLogDir)
+ if err != nil {
+ err = fmt.Errorf("cannot read transaction log directory %s: %v", *txnLogDir, err)
+ output <- txGroupItem{err: err}
+ return
+ }
+ for _, file := range files {
+ if file.IsDir() {
+ continue
+ }
+ path := filepath.Join(*txnLogDir, file.Name())
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go transcribeSnappyLog(path, output, &wg)
+ wg.Wait()
+ }
+ } else {
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go transcribeSnappyLog(*txnLogFile, output, &wg)
+ wg.Wait()
+ }
+ }
+}
+
+func main() {
+ flag.Parse()
+
+ if *help || *helpShort || len(*dataDir) == 0 || *roundStart == 0 {
+ usage()
+ os.Exit(1)
+ }
+
+ if len(*txnDir) > 0 && len(*txnLogDir) > 0 {
+ fmt.Fprintln(os.Stderr, "Cannot specify both transactions and transaction logs dirs")
+ os.Exit(1)
+ }
+
+ if !*force {
+ fmt.Println("Running this command could damage your node installation, proceed anyway (N/y)?")
+ reader := bufio.NewReader(os.Stdin)
+ resp, err := reader.ReadString('\n')
+ resp = strings.TrimSpace(resp)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Cannot read confirmation: %v\n", err)
+ os.Exit(1)
+ }
+ if strings.ToLower(resp) != "y" {
+ fmt.Fprintln(os.Stderr, "Exiting...")
+ os.Exit(1)
+ }
+ }
+
+ genesisInitState, genesis, ledgerPathnamePrefix, err := prepareGenesis()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Loading genesis error: %v", err)
+ os.Exit(1)
+ }
+
+ cfg, rootPath, err := getConfig()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Loading config error: %v", err)
+ os.Exit(1)
+ }
+
+ log := logging.Base()
+
+ l, err := ledger.OpenLedger(log, ledgerPathnamePrefix, false, genesisInitState, cfg)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Cannot open ledger config: %v", err)
+ os.Exit(1)
+ }
+
+ var followerNode *node.AlgorandFollowerNode
+ latest := l.Latest()
+ if latest < basics.Round(*roundStart) {
+ l.Close()
+
+ fmt.Printf("Catching up from %d to %d\n", latest, *roundStart)
+
+ followerNode, err = node.MakeFollower(log, rootPath, cfg, []string{}, genesis)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Cannot init follower node: %v", err)
+ os.Exit(1)
+ }
+ syncRound := uint64(*roundStart) - cfg.MaxAcctLookback + 1
+ err = followerNode.SetSyncRound(syncRound)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Cannot configure catchup: %v", err)
+ os.Exit(1)
+ }
+
+ followerNode.Start()
+
+ for followerNode.Ledger().Latest() < basics.Round(*roundStart) {
+ fmt.Printf("At round %d, waiting for %d\n", followerNode.Ledger().Latest(), *roundStart)
+ time.Sleep(5 * time.Second)
+ }
+ followerNode.Stop()
+
+ fmt.Printf("Caught up to %d\n", *roundStart)
+ l, err = ledger.OpenLedger(log, ledgerPathnamePrefix, false, genesisInitState, cfg)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Cannot open ledger config: %v", err)
+ os.Exit(1)
+ }
+ }
+ defer l.Close()
+ if len(*txnDir) == 0 && len(*txnLogDir) == 0 && len(*txnLogFile) == 0 && len(*txnFile) == 0 {
+ fmt.Printf("No transaction [log] directory specified, exiting at round %d\n", l.Latest())
+ return
+ }
+
+ input := make(chan txGroupItem)
+ go readTransactions(input)
+
+ nextRound := l.Latest() + 1
+ txCount := 0
+ totalTxCount := 0
+ blockCount := 0
+ pool := pools.MakeTransactionPool(l, cfg, log)
+ hdr, err := l.BlockHdr(l.Latest())
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Cannot get latest block header: %v", err)
+ os.Exit(1)
+ }
+ for item := range input {
+ if item.err != nil {
+ fmt.Fprintf(os.Stderr, "ERR: reading transaction file %s failed: %v\n", item.path, item.err)
+ os.Exit(1)
+ }
+ if !item.ts.IsZero() {
+ txnTs := item.ts.Unix()
+ if txnTs < hdr.TimeStamp {
+ // fmt.Printf("INFO: skipping too early txn (%d < %d) from %s\n", txnTs, hdr.TimeStamp, item.path)
+ continue
+ }
+ if txnTs > hdr.TimeStamp+int64(10) {
+ fmt.Printf("INFO: too old txns, quitting... %s\n", item.path)
+ break
+ }
+ }
+ err = pool.Remember(item.txgroup)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "WARN: Cannot remember txn %s: %v\n", item.path, err)
+ continue
+ } else {
+ fmt.Fprintf(os.Stderr, "ADDED: from %s\n", item.path)
+ }
+ txCount++
+ totalTxCount++
+
+ if txCount >= *blockSize {
+ deadline := time.Now().Add(100 * time.Millisecond)
+ vb, err := pool.AssembleBlock(nextRound, deadline)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "ERR: Cannot assemble block %d: %v\n", nextRound, err)
+ break
+ }
+
+ err = l.AddValidatedBlock(*vb, agreement.Certificate{})
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "ERR: Cannot add block %d: %v\n", nextRound, err)
+ break
+ }
+ blockCount++
+ txCount = 0
+ hdr = vb.Block().BlockHeader
+ }
+ }
+
+ fmt.Printf("Added %d blocks (%d transactions) up to round %d\n", blockCount, totalTxCount, l.Latest())
+}
diff --git a/tools/network/bootstrap.go b/tools/network/bootstrap.go
index b3bd79b23..7558100a6 100644
--- a/tools/network/bootstrap.go
+++ b/tools/network/bootstrap.go
@@ -19,12 +19,12 @@ package network
import (
"context"
"fmt"
+ "net"
"github.com/algorand/go-algorand/logging"
)
-// ReadFromSRV is a helper to collect SRV addresses for a given name.
-func ReadFromSRV(service string, protocol string, name string, fallbackDNSResolverAddress string, secure bool) (addrs []string, err error) {
+func readFromSRV(service string, protocol string, name string, fallbackDNSResolverAddress string, secure bool) (records []*net.SRV, err error) {
log := logging.Base()
if name == "" {
log.Debug("no dns lookup due to empty name")
@@ -61,6 +61,38 @@ func ReadFromSRV(service string, protocol string, name string, fallbackDNSResolv
}
}
}
+ return records, err
+}
+
+// ReadFromSRV is a helper to collect SRV addresses for a given name
+func ReadFromSRV(service string, protocol string, name string, fallbackDNSResolverAddress string, secure bool) (addrs []string, err error) {
+ records, err := readFromSRV(service, protocol, name, fallbackDNSResolverAddress, secure)
+ if err != nil {
+ return addrs, err
+ }
+
+ for _, srv := range records {
+ // empty target won't take us far; skip these
+ if srv.Target == "" {
+ continue
+ }
+ // according to the SRV spec, each target need to end with a dot. While this would make a valid host name, including the
+ // last dot could lead to a non-canonical domain name representation, which is better avoided.
+ if srv.Target[len(srv.Target)-1:] == "." {
+ srv.Target = srv.Target[:len(srv.Target)-1]
+ }
+ addrs = append(addrs, fmt.Sprintf("%s:%d", srv.Target, srv.Port))
+ }
+ return
+}
+
+// ReadFromSRVPriority is a helper to collect SRV addresses with priorities for a given name
+func ReadFromSRVPriority(service string, protocol string, name string, fallbackDNSResolverAddress string, secure bool) (prioAddrs map[uint16][]string, err error) {
+ records, err := readFromSRV(service, protocol, name, fallbackDNSResolverAddress, secure)
+ if err != nil {
+ return prioAddrs, err
+ }
+ prioAddrs = make(map[uint16][]string, 4)
for _, srv := range records {
// empty target won't take us far; skip these
if srv.Target == "" {
@@ -71,7 +103,9 @@ func ReadFromSRV(service string, protocol string, name string, fallbackDNSResolv
if srv.Target[len(srv.Target)-1:] == "." {
srv.Target = srv.Target[:len(srv.Target)-1]
}
+ addrs := prioAddrs[srv.Priority]
addrs = append(addrs, fmt.Sprintf("%s:%d", srv.Target, srv.Port))
+ prioAddrs[srv.Priority] = addrs
}
return
}
diff --git a/tools/network/bootstrap_test.go b/tools/network/bootstrap_test.go
new file mode 100644
index 000000000..69ed39b2d
--- /dev/null
+++ b/tools/network/bootstrap_test.go
@@ -0,0 +1,66 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package network
+
+import (
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestReadFromSRVPriority(t *testing.T) {
+ t.Parallel()
+ partitiontest.PartitionTest(t)
+
+ service := "telemetry"
+ protocol := "tls"
+ name := "devnet.algodev.network"
+ fallback := ""
+ secure := true
+
+ prioAddrs, err := ReadFromSRVPriority("", protocol, name, fallback, secure)
+ require.Error(t, err)
+
+ prioAddrs, err = ReadFromSRVPriority(service, protocol, name, fallback, secure)
+ require.NoError(t, err)
+ addrs, ok := prioAddrs[1]
+ require.True(t, ok)
+ require.GreaterOrEqual(t, len(addrs), 1)
+ addr := addrs[0]
+ require.Greater(t, len(addr), 1)
+}
+
+func TestReadFromSRV(t *testing.T) {
+ t.Parallel()
+ partitiontest.PartitionTest(t)
+
+ service := "telemetry"
+ protocol := "tls"
+ name := "devnet.algodev.network"
+ fallback := ""
+ secure := true
+
+ addrs, err := ReadFromSRV("", protocol, name, fallback, secure)
+ require.Error(t, err)
+
+ addrs, err = ReadFromSRV(service, protocol, name, fallback, secure)
+ require.NoError(t, err)
+ require.GreaterOrEqual(t, len(addrs), 1)
+ addr := addrs[0]
+ require.Greater(t, len(addr), 1)
+}
diff --git a/tools/network/dnssec/config_windows.go b/tools/network/dnssec/config_windows.go
index 8d291be67..36c0aaf37 100644
--- a/tools/network/dnssec/config_windows.go
+++ b/tools/network/dnssec/config_windows.go
@@ -42,12 +42,12 @@ const (
const ip_size = 16
-// typedef struct _IP_ADDR_STRING {
-// struct _IP_ADDR_STRING *Next;
-// IP_ADDRESS_STRING IpAddress; // The String member is a char array of size 16. This array holds an IPv4 address in dotted decimal notation.
-// IP_MASK_STRING IpMask; // The String member is a char array of size 16. This array holds the IPv4 subnet mask in dotted decimal notation.
-// DWORD Context;
-// } IP_ADDR_STRING, *PIP_ADDR_STRING;
+// typedef struct _IP_ADDR_STRING {
+// struct _IP_ADDR_STRING *Next;
+// IP_ADDRESS_STRING IpAddress; // The String member is a char array of size 16. This array holds an IPv4 address in dotted decimal notation.
+// IP_MASK_STRING IpMask; // The String member is a char array of size 16. This array holds the IPv4 subnet mask in dotted decimal notation.
+// DWORD Context;
+// } IP_ADDR_STRING, *PIP_ADDR_STRING;
//
// https://docs.microsoft.com/en-us/windows/win32/api/iptypes/ns-iptypes-ip_addr_string
type ipAddrString struct {
@@ -57,17 +57,17 @@ type ipAddrString struct {
Context uint32
}
-// typedef struct {
-// char HostName[MAX_HOSTNAME_LEN + 4];
-// char DomainName[MAX_DOMAIN_NAME_LEN + 4];
-// PIP_ADDR_STRING CurrentDnsServer;
-// IP_ADDR_STRING DnsServerList;
-// UINT NodeType;
-// char ScopeId[MAX_SCOPE_ID_LEN + 4];
-// UINT EnableRouting;
-// UINT EnableProxy;
-// UINT EnableDns;
-// } FIXED_INFO_W2KSP1, *PFIXED_INFO_W2KSP1;
+// typedef struct {
+// char HostName[MAX_HOSTNAME_LEN + 4];
+// char DomainName[MAX_DOMAIN_NAME_LEN + 4];
+// PIP_ADDR_STRING CurrentDnsServer;
+// IP_ADDR_STRING DnsServerList;
+// UINT NodeType;
+// char ScopeId[MAX_SCOPE_ID_LEN + 4];
+// UINT EnableRouting;
+// UINT EnableProxy;
+// UINT EnableDns;
+// } FIXED_INFO_W2KSP1, *PFIXED_INFO_W2KSP1;
//
// https://docs.microsoft.com/en-us/windows/win32/api/iptypes/ns-iptypes-fixed_info_w2ksp1
type fixedInfo struct {
diff --git a/tools/network/dnssec/dialer.go b/tools/network/dnssec/dialer.go
index 08144ae5b..b80277afd 100644
--- a/tools/network/dnssec/dialer.go
+++ b/tools/network/dnssec/dialer.go
@@ -33,14 +33,15 @@ type Dialer struct {
// DialContext connects to the address on the named network using the provided context.
// It waits if needed not to exceed connectionsRateLimitingCount.
// Idea:
-// net.Dialer.DialContext calls net.Dialer.resolver().resolveAddrList
-// that calls net.Resolver.internetAddrList
-// that ends up in LookupIPAddr -> lookupIPAddr -> parseIPZone -> return
-// So this DialContext:
-// 1. Parses address to host and port
-// 2. If the host is not IPv4/IPv6 address then resolves it with DNSSEC
-// 3. Calls original net.DialContext knowing that the name already resolved
-// and the control flow would be as described above
+//
+// net.Dialer.DialContext calls net.Dialer.resolver().resolveAddrList
+// that calls net.Resolver.internetAddrList
+// that ends up in LookupIPAddr -> lookupIPAddr -> parseIPZone -> return
+// So this DialContext:
+// 1. Parses address to host and port
+// 2. If the host is not IPv4/IPv6 address then resolves it with DNSSEC
+// 3. Calls original net.DialContext knowing that the name already resolved
+// and the control flow would be as described above
func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
// snipped below is from net.Resolver.internetAddrList
diff --git a/tools/network/dnssec/resolver.go b/tools/network/dnssec/resolver.go
index 2b0b873c3..8900616f1 100644
--- a/tools/network/dnssec/resolver.go
+++ b/tools/network/dnssec/resolver.go
@@ -33,7 +33,6 @@ References
5. DNSSEC clarifications https://tools.ietf.org/html/rfc6840
6. DNSSEC keys management https://tools.ietf.org/html/rfc6781
7. DNS SRV https://tools.ietf.org/html/rfc2782
-
*/
package dnssec
diff --git a/tools/network/dnssec/sort_test.go b/tools/network/dnssec/sort_test.go
index ab1be21fb..f84ed118d 100644
--- a/tools/network/dnssec/sort_test.go
+++ b/tools/network/dnssec/sort_test.go
@@ -32,14 +32,26 @@ func TestSrvSort(t *testing.T) {
arr := make([]*net.SRV, 0, 7)
arr = append(arr, &net.SRV{Priority: 4, Weight: 1})
arr = append(arr, &net.SRV{Priority: 3, Weight: 1})
- arr = append(arr, &net.SRV{Priority: 1, Weight: 200})
+ arr = append(arr, &net.SRV{Priority: 1, Weight: 0xFFFF}) // max possible value to increase the ordering probability
arr = append(arr, &net.SRV{Priority: 1, Weight: 1})
arr = append(arr, &net.SRV{Priority: 1, Weight: 1})
arr = append(arr, &net.SRV{Priority: 1, Weight: 1})
arr = append(arr, &net.SRV{Priority: 1, Weight: 1})
+ retryCounter := 0
+retry:
srvRecArray(arr).sortAndRand()
- a.Equal(net.SRV{Priority: 1, Weight: 200}, *arr[0])
+ if (*arr[0] != net.SRV{Priority: 1, Weight: 0xFFFF}) {
+ // there is a small chance that a random number from 0 to max uint15 would be 0 or 1
+ // in this case the first element of the resulting sequence would be with weight of 1 and not the highest possible.
+ // if this happens, we will try again since it is expected time to time.
+ if retryCounter > 1 {
+ a.Fail("The first element of the resulting sequence should be with the highest possible weight at least in one of 3 attempts")
+ }
+ retryCounter++
+ goto retry
+ }
+ a.Equal(net.SRV{Priority: 1, Weight: 0xFFFF}, *arr[0])
a.Equal(net.SRV{Priority: 1, Weight: 1}, *arr[1])
a.Equal(net.SRV{Priority: 1, Weight: 1}, *arr[2])
a.Equal(net.SRV{Priority: 1, Weight: 1}, *arr[3])
diff --git a/tools/network/telemetryURIUpdateService.go b/tools/network/telemetryURIUpdateService.go
index becae7c74..c6a573557 100644
--- a/tools/network/telemetryURIUpdateService.go
+++ b/tools/network/telemetryURIUpdateService.go
@@ -81,32 +81,33 @@ func (t *telemetryURIUpdater) Start() {
}()
}
+// TODO: Support secondary telemetry SRV record lookup
func (t *telemetryURIUpdater) lookupTelemetryURL() (url *url.URL) {
bootstrapArray := t.cfg.DNSBootstrapArray(t.genesisNetwork)
- bootstrapArray = append(bootstrapArray, "default.algodev.network")
- for _, bootstrapID := range bootstrapArray {
- addrs, err := t.srvReader.readFromSRV("tls", bootstrapID)
+ bootstrapArray = append(bootstrapArray, &config.DNSBootstrap{PrimarySRVBootstrap: "default.algodev.network"})
+ for _, dnsBootstrap := range bootstrapArray {
+ addrs, err := t.srvReader.readFromSRV("tls", dnsBootstrap.PrimarySRVBootstrap)
if err != nil {
- t.log.Infof("An issue occurred reading telemetry entry for '_telemetry._tls.%s': %v", bootstrapID, err)
+ t.log.Infof("An issue occurred reading telemetry entry for '_telemetry._tls.%s': %v", dnsBootstrap.PrimarySRVBootstrap, err)
} else if len(addrs) == 0 {
- t.log.Infof("No telemetry entry for: '_telemetry._tls.%s'", bootstrapID)
+ t.log.Infof("No telemetry entry for: '_telemetry._tls.%s'", dnsBootstrap.PrimarySRVBootstrap)
} else {
for _, addr := range addrs {
// the addr that we received from ReadFromSRV contains host:port, we need to prefix that with the schema. since it's the tls, we want to use https.
url, err = url.Parse("https://" + addr)
if err != nil {
- t.log.Infof("a telemetry endpoint '%s' was retrieved for '_telemerty._tls.%s'. This does not seems to be a valid endpoint and will be ignored(%v).", addr, bootstrapID, err)
+ t.log.Infof("a telemetry endpoint '%s' was retrieved for '_telemerty._tls.%s'. This does not seems to be a valid endpoint and will be ignored(%v).", addr, dnsBootstrap.PrimarySRVBootstrap, err)
continue
}
return url
}
}
- addrs, err = t.srvReader.readFromSRV("tcp", bootstrapID)
+ addrs, err = t.srvReader.readFromSRV("tcp", dnsBootstrap.PrimarySRVBootstrap)
if err != nil {
- t.log.Infof("An issue occurred reading telemetry entry for '_telemetry._tcp.%s': %v", bootstrapID, err)
+ t.log.Infof("An issue occurred reading telemetry entry for '_telemetry._tcp.%s': %v", dnsBootstrap.PrimarySRVBootstrap, err)
} else if len(addrs) == 0 {
- t.log.Infof("No telemetry entry for: '_telemetry._tcp.%s'", bootstrapID)
+ t.log.Infof("No telemetry entry for: '_telemetry._tcp.%s'", dnsBootstrap.PrimarySRVBootstrap)
} else {
for _, addr := range addrs {
if strings.HasPrefix(addr, "https://") {
@@ -118,7 +119,7 @@ func (t *telemetryURIUpdater) lookupTelemetryURL() (url *url.URL) {
}
if err != nil {
- t.log.Infof("a telemetry endpoint '%s' was retrieved for '_telemerty._tcp.%s'. This does not seems to be a valid endpoint and will be ignored(%v).", addr, bootstrapID, err)
+ t.log.Infof("a telemetry endpoint '%s' was retrieved for '_telemerty._tcp.%s'. This does not seems to be a valid endpoint and will be ignored(%v).", addr, dnsBootstrap.PrimarySRVBootstrap, err)
continue
}
return url
diff --git a/tools/teal/tealcut/main.go b/tools/teal/tealcut/main.go
index 0740929db..494392564 100644
--- a/tools/teal/tealcut/main.go
+++ b/tools/teal/tealcut/main.go
@@ -17,6 +17,7 @@
package main
import (
+ "bytes"
"crypto/sha512"
"encoding/base64"
"encoding/binary"
@@ -24,7 +25,6 @@ import (
"fmt"
"os"
"strconv"
- "strings"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
@@ -59,16 +59,16 @@ func main() {
if err != nil {
panic(err)
}
- substrings := strings.SplitN(string(data), string(splitbytes[:]), 2)
+ before, after, found := bytes.Cut(data, splitbytes[:])
fmt.Println(splitbytes[:])
- if len(substrings) == 1 {
+ if !found {
fmt.Println("split-string not found")
return
}
- hash0 := sha512.Sum512_256([]byte(substrings[0]))
- hash1 := sha512.Sum512_256([]byte(substrings[1]))
+ hash0 := sha512.Sum512_256(before)
+ hash1 := sha512.Sum512_256(after)
encfn := func(str []byte) string {
return "0x" + hex.EncodeToString(str)
@@ -83,8 +83,10 @@ func main() {
}
var writebytes [8]byte
binary.BigEndian.PutUint64(writebytes[:], writenum)
- program := append([]byte(substrings[0]), writebytes[:]...)
- program = append(program, []byte(substrings[1])...)
+ // append to empty slice to avoid modifying `data` (which before points to)
+ program := append([]byte{}, before...)
+ program = append(program, writebytes[:]...)
+ program = append(program, after...)
obj := logic.Program(program)
lhash := crypto.HashObj(&obj)
@@ -93,9 +95,9 @@ func main() {
}
}
- fmt.Println("hash0:", encfn([]byte(hash0[:])))
- fmt.Println("hash1:", encfn([]byte(hash1[:])))
- fmt.Println("sub0:", encfn([]byte(substrings[0])))
- fmt.Println("sub1:", encfn([]byte(substrings[1])))
- fmt.Println("data:", encfn([]byte(data)))
+ fmt.Println("hash0:", encfn(hash0[:]))
+ fmt.Println("hash1:", encfn(hash1[:]))
+ fmt.Println("sub0:", encfn(before))
+ fmt.Println("sub1:", encfn(after))
+ fmt.Println("data:", encfn(data))
}
diff --git a/tools/x-repo-types/go.mod b/tools/x-repo-types/go.mod
index 0fba7a8da..776b76a0e 100644
--- a/tools/x-repo-types/go.mod
+++ b/tools/x-repo-types/go.mod
@@ -1,6 +1,6 @@
module github.com/algorand/go-algorand/tools/x-repo-types
-go 1.17
+go 1.20
replace github.com/algorand/go-algorand => ../..
diff --git a/tools/x-repo-types/typeAnalyzer/typeAnalyzer.go b/tools/x-repo-types/typeAnalyzer/typeAnalyzer.go
index 3626e01ad..7aef6bde5 100644
--- a/tools/x-repo-types/typeAnalyzer/typeAnalyzer.go
+++ b/tools/x-repo-types/typeAnalyzer/typeAnalyzer.go
@@ -32,6 +32,9 @@ import (
var diffExclusions = map[string]bool{
// MicroAlgos is a struct with custom marshal override in go-algorand. In other repos it is a uint64.
`github.com/algorand/go-algorand/data/basics :: "basics.MicroAlgos" (struct)`: true,
+
+ // crypto.PublicKey is an array [32]byte in go-algorand, but is a byte-slice elsewhere.
+ `github.com/algorand/go-algorand/crypto :: "crypto.PublicKey" (array)`: true,
}
// --------------- TYPE TREE DATA STRUCTURES --------------- //
diff --git a/tools/x-repo-types/xrt_test.go b/tools/x-repo-types/xrt_test.go
index 119954e63..4360b432d 100644
--- a/tools/x-repo-types/xrt_test.go
+++ b/tools/x-repo-types/xrt_test.go
@@ -31,6 +31,8 @@ type testCase struct {
}
func TestCrossRepoTypes(t *testing.T) {
+ // NOTE: the heavy lifting is done by the first test case, so it's better to apply PartitionTest to the
+ // entire test as opposed to partitioning each test case.
partitiontest.PartitionTest(t)
testCases := []testCase{
@@ -64,6 +66,22 @@ func TestCrossRepoTypes(t *testing.T) {
skipReason: `Several issues. For example: LEVEL 5 of goal bookkeeping.Block is EvalDelta with field [SharedAccts](codec:"sa,allocbound=config.MaxEvalDeltaAccounts") VS SDK types.EvalDelta is missing SharedAccts field`,
},
{
+ name: "goal-v-sdk-eval-delta",
+ xPkg: "github.com/algorand/go-algorand/data/transactions",
+ xType: "EvalDelta",
+ yPkg: "github.com/algorand/go-algorand-sdk/v2/types",
+ yBranch: "develop",
+ yType: "EvalDelta",
+ },
+ {
+ name: "goal-v-sdk-consensus",
+ xPkg: "github.com/algorand/go-algorand/config",
+ xType: "ConsensusParams",
+ yPkg: "github.com/algorand/go-algorand-sdk/v2/protocol/config",
+ yBranch: "develop",
+ yType: "ConsensusParams",
+ },
+ {
name: "goal-v-sdk-blockheader",
xPkg: "github.com/algorand/go-algorand/data/bookkeeping",
xType: "BlockHeader",
diff --git a/util/bloom/bloom.go b/util/bloom/bloom.go
index eeffc88ef..483cabc6f 100644
--- a/util/bloom/bloom.go
+++ b/util/bloom/bloom.go
@@ -43,7 +43,7 @@ func New(sizeBits int, numHashes uint32, prefix uint32) *Filter {
// These parameters are optimal for small bloom filters as
// described in section 4.1 of this paper:
//
-// https://web.stanford.edu/~ashishg/papers/inverted.pdf
+// https://web.stanford.edu/~ashishg/papers/inverted.pdf
func Optimal(numElements int, falsePositiveRate float64) (sizeBits int, numHashes uint32) {
n := float64(numElements)
p := falsePositiveRate
diff --git a/util/execpool/pool.go b/util/execpool/pool.go
index 50acbf9a5..d44657206 100644
--- a/util/execpool/pool.go
+++ b/util/execpool/pool.go
@@ -107,7 +107,7 @@ func (p *pool) GetOwner() interface{} {
//
// Enqueue blocks until the task is enqueued correctly, or until the passed-in
// context is cancelled.
-///
+// /
// Enqueue returns nil if task was enqueued successfully or the result of the
// expired context error.
func (p *pool) Enqueue(enqueueCtx context.Context, t ExecFunc, arg interface{}, i Priority, out chan interface{}) error {
diff --git a/util/execpool/stream.go b/util/execpool/stream.go
index af2bb0809..1ac61cb27 100644
--- a/util/execpool/stream.go
+++ b/util/execpool/stream.go
@@ -112,8 +112,8 @@ func (sv *StreamToBatch) batchingLoop() {
// if no batchable items here, send this as a task of its own
if numberOfBatchable == 0 {
- err := sv.addBatchToThePoolNow([]InputJob{job})
- if err != nil {
+ addErr := sv.addBatchToThePoolNow([]InputJob{job})
+ if addErr != nil {
return
}
continue // job is handled, continue
@@ -129,8 +129,8 @@ func (sv *StreamToBatch) batchingLoop() {
// do not consider adding more jobs to this batch.
// bypass the exec pool situation and queue anyway
// this is to prevent creation of very large batches
- err := sv.addBatchToThePoolNow(uJobs)
- if err != nil {
+ addErr := sv.addBatchToThePoolNow(uJobs)
+ if addErr != nil {
return
}
added = true
diff --git a/util/metrics/counter.go b/util/metrics/counter.go
index bb8355cac..2efb52be5 100644
--- a/util/metrics/counter.go
+++ b/util/metrics/counter.go
@@ -114,6 +114,19 @@ func (counter *Counter) GetUint64Value() (x uint64) {
return atomic.LoadUint64(&counter.intValue)
}
+// GetUint64ValueForLabels returns the value of the counter for the given labels or 0 if it's not found.
+func (counter *Counter) GetUint64ValueForLabels(labels map[string]string) uint64 {
+ counter.Lock()
+ defer counter.Unlock()
+
+ labelIndex := counter.findLabelIndex(labels)
+ counterIdx, has := counter.valuesIndices[labelIndex]
+ if !has {
+ return 0
+ }
+ return counter.values[counterIdx].counter
+}
+
func (counter *Counter) fastAddUint64(x uint64) {
if atomic.AddUint64(&counter.intValue, x) == x {
// What we just added is the whole value, this
diff --git a/util/metrics/counter_test.go b/util/metrics/counter_test.go
index fe7d553e4..343f1f5fa 100644
--- a/util/metrics/counter_test.go
+++ b/util/metrics/counter_test.go
@@ -211,3 +211,23 @@ func TestGetValue(t *testing.T) {
c.Inc(nil)
require.Equal(t, uint64(2), c.GetUint64Value())
}
+
+func TestGetValueForLabels(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ c := MakeCounter(MetricName{Name: "testname", Description: "testhelp"})
+ c.Deregister(nil)
+
+ labels := map[string]string{"a": "b"}
+ require.Equal(t, uint64(0), c.GetUint64ValueForLabels(labels))
+ c.Inc(labels)
+ require.Equal(t, uint64(1), c.GetUint64ValueForLabels(labels))
+ c.Inc(labels)
+ require.Equal(t, uint64(2), c.GetUint64ValueForLabels(labels))
+ // confirm that the value is not shared between labels
+ c.Inc(nil)
+ require.Equal(t, uint64(2), c.GetUint64ValueForLabels(labels))
+ labels2 := map[string]string{"a": "c"}
+ c.Inc(labels2)
+ require.Equal(t, uint64(1), c.GetUint64ValueForLabels(labels2))
+}
diff --git a/util/metrics/metrics.go b/util/metrics/metrics.go
index f2437f267..bd68ff4c1 100644
--- a/util/metrics/metrics.go
+++ b/util/metrics/metrics.go
@@ -81,6 +81,8 @@ var (
LedgerRewardClaimsTotal = MetricName{Name: "algod_ledger_reward_claims_total", Description: "Total number of reward claims written to the ledger"}
// LedgerRound Last round written to ledger
LedgerRound = MetricName{Name: "algod_ledger_round", Description: "Last round written to ledger"}
+ // LedgerDBRound Last round written to ledger
+ LedgerDBRound = MetricName{Name: "algod_ledger_dbround", Description: "Last round written to the ledger DB"}
// AgreementMessagesHandled "Number of agreement messages handled"
AgreementMessagesHandled = MetricName{Name: "algod_agreement_handled", Description: "Number of agreement messages handled"}
diff --git a/util/metrics/tagcounter.go b/util/metrics/tagcounter.go
index 1daf30ba1..ff3b8a732 100644
--- a/util/metrics/tagcounter.go
+++ b/util/metrics/tagcounter.go
@@ -22,6 +22,7 @@ import (
"sync/atomic"
"github.com/algorand/go-deadlock"
+ "golang.org/x/exp/maps"
)
// NewTagCounterFiltered makes a set of metrics under rootName for tagged counting.
@@ -98,9 +99,7 @@ func (tc *TagCounter) Add(tag string, val uint64) {
// Still need to add a new tag.
// Make a new map so there's never any race.
newtags := make(map[string]*uint64, len(tc.tags)+1)
- for k, v := range tc.tags {
- newtags[k] = v
- }
+ maps.Copy(newtags, tc.tags)
var st []uint64
if len(tc.storage) > 0 {
st = tc.storage[len(tc.storage)-1]
diff --git a/util/process_windows.go b/util/process_windows.go
index 7fe14d974..263b997f5 100644
--- a/util/process_windows.go
+++ b/util/process_windows.go
@@ -166,8 +166,9 @@ func killProcess(pid int) error {
}
// NOTE: Unlike Unix, Windows tries to open the target process in order to kill it.
-// ERROR_INVALID_PARAMETER is returned if the process does not exists.
-// To mimic other OS behavior, if the process does not exist, don't return an error
+//
+// ERROR_INVALID_PARAMETER is returned if the process does not exists.
+// To mimic other OS behavior, if the process does not exist, don't return an error
func isInvalidParameterError(err error) bool {
var syscallError syscall.Errno
diff --git a/util/sleep_linux_32.go b/util/sleep_linux_32.go
index 8dd445e2a..249134932 100644
--- a/util/sleep_linux_32.go
+++ b/util/sleep_linux_32.go
@@ -31,5 +31,5 @@ func NanoSleep(d time.Duration) {
Nsec: int32(d.Nanoseconds() % time.Second.Nanoseconds()),
Sec: int32(d.Nanoseconds() / time.Second.Nanoseconds()),
}
- syscall.Nanosleep(timeSpec, nil) // nolint:errcheck // ignoring error
+ syscall.Nanosleep(timeSpec, nil) //nolint:errcheck // ignoring error
}
diff --git a/util/sleep_linux_64.go b/util/sleep_linux_64.go
index 0fff615e7..cb6a7574c 100644
--- a/util/sleep_linux_64.go
+++ b/util/sleep_linux_64.go
@@ -30,5 +30,5 @@ func NanoSleep(d time.Duration) {
Nsec: d.Nanoseconds() % time.Second.Nanoseconds(),
Sec: d.Nanoseconds() / time.Second.Nanoseconds(),
}
- syscall.Nanosleep(timeSpec, nil) // nolint:errcheck // ignoring error
+ syscall.Nanosleep(timeSpec, nil) //nolint:errcheck // ignoring error
}
diff --git a/util/tcpinfo_linux.go b/util/tcpinfo_linux.go
index 8cf1687ae..69fca11f9 100644
--- a/util/tcpinfo_linux.go
+++ b/util/tcpinfo_linux.go
@@ -55,6 +55,7 @@ func getConnTCPInfo(raw syscall.RawConn) (*TCPInfo, error) {
}
// linuxTCPInfo is based on linux include/uapi/linux/tcp.h struct tcp_info
+//
//revive:disable:var-naming
//nolint:structcheck // complains about unused fields that are rqeuired to match C tcp_info struct
type linuxTCPInfo struct {