summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Lee <64482439+algojohnlee@users.noreply.github.com>2023-06-12 11:33:15 -0400
committerGitHub <noreply@github.com>2023-06-12 11:33:15 -0400
commit17e2f623ff2004c0f701c01b8a8e7c6b2ec2d704 (patch)
treeac1fdacd02858e5d3973fb67a93c537bd021fe82
parent5f2d5ede86af95246e0cba00f8103f6fc1692bd2 (diff)
parent7f246dbd2e45fa8d9d6c96a297ed0f0230a67155 (diff)
Merge pull request #5465 from Algo-devops-service/relstable3.16.1v3.16.1-stable
-rw-r--r--.circleci/config.yml9
-rw-r--r--.github/ISSUE_TEMPLATE/bug_report.md2
-rw-r--r--.github/workflows/pr-type-category.yml4
-rw-r--r--.github/workflows/reviewdog.yml1
-rw-r--r--.gitignore6
-rw-r--r--.golangci.yml10
-rw-r--r--CODEOWNERS2
-rw-r--r--CODE_OF_CONDUCT.md128
-rw-r--r--CONTRIBUTING.md74
-rw-r--r--Dockerfile6
-rw-r--r--Makefile24
-rw-r--r--README.md37
-rw-r--r--agreement/README.md14
-rw-r--r--agreement/abstractions.go6
-rw-r--r--agreement/agreementtest/simulate.go2
-rw-r--r--agreement/agreementtest/simulate_test.go2
-rw-r--r--agreement/common_test.go2
-rw-r--r--agreement/demux_test.go2
-rw-r--r--agreement/fuzzer/ledger_test.go2
-rw-r--r--agreement/selector.go2
-rw-r--r--catchup/catchpointService.go4
-rw-r--r--catchup/service.go40
-rw-r--r--catchup/service_test.go2
-rw-r--r--cmd/algocfg/profileCommand.go9
-rw-r--r--cmd/algocfg/profileCommand_test.go48
-rw-r--r--cmd/algod/main.go13
-rw-r--r--cmd/catchpointdump/database.go55
-rw-r--r--cmd/catchpointdump/file.go70
-rw-r--r--cmd/catchpointdump/net.go4
-rw-r--r--cmd/goal/README.md2
-rw-r--r--cmd/goal/application.go6
-rw-r--r--cmd/goal/clerk.go241
-rw-r--r--cmd/goal/messages.go2
-rw-r--r--cmd/goal/node.go6
-rw-r--r--cmd/loadgenerator/main.go4
-rw-r--r--cmd/opdoc/opdoc.go188
-rw-r--r--cmd/pingpong/runCmd.go4
-rw-r--r--cmd/tealdbg/localLedger.go4
-rw-r--r--cmd/tealdbg/localLedger_test.go2
-rw-r--r--cmd/tealdbg/local_test.go2
-rw-r--r--cmd/updater/version_test.go9
-rw-r--r--config/config.go2
-rw-r--r--config/consensus.go66
-rw-r--r--config/localTemplate.go4
-rw-r--r--config/local_defaults.go1
-rw-r--r--config/migrate.go3
-rw-r--r--config/version.go4
-rw-r--r--config/version_test.go60
-rw-r--r--crypto/merklesignature/keysBuilder.go2
-rw-r--r--crypto/merkletrie/cache.go26
-rw-r--r--crypto/merkletrie/cache_test.go4
-rw-r--r--crypto/merkletrie/committer.go2
-rw-r--r--crypto/merkletrie/committer_test.go10
-rw-r--r--crypto/merkletrie/node.go20
-rw-r--r--crypto/merkletrie/node_test.go100
-rw-r--r--crypto/merkletrie/trie.go50
-rw-r--r--crypto/stateproof/msgp_gen.go677
-rw-r--r--crypto/stateproof/msgp_gen_test.go120
-rw-r--r--crypto/stateproof/prover.go (renamed from crypto/stateproof/builder.go)121
-rw-r--r--crypto/stateproof/prover_test.go (renamed from crypto/stateproof/builder_test.go)56
-rw-r--r--crypto/util.go7
-rw-r--r--daemon/algod/api/algod.oas2.json442
-rw-r--r--daemon/algod/api/algod.oas3.yml647
-rw-r--r--daemon/algod/api/client/restClient.go155
-rw-r--r--daemon/algod/api/server/common/handlers.go60
-rw-r--r--daemon/algod/api/server/common/routes.go7
-rw-r--r--daemon/algod/api/server/common/test/handlers_test.go88
-rw-r--r--daemon/algod/api/server/common/test/helpers.go139
-rw-r--r--daemon/algod/api/server/lib/common.go6
-rw-r--r--daemon/algod/api/server/router.go6
-rw-r--r--daemon/algod/api/server/v2/dryrun.go6
-rw-r--r--daemon/algod/api/server/v2/dryrun_test.go12
-rw-r--r--daemon/algod/api/server/v2/errors.go10
-rw-r--r--daemon/algod/api/server/v2/generated/data/routes.go373
-rw-r--r--daemon/algod/api/server/v2/generated/experimental/routes.go329
-rw-r--r--daemon/algod/api/server/v2/generated/model/types.go118
-rw-r--r--daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go340
-rw-r--r--daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go606
-rw-r--r--daemon/algod/api/server/v2/generated/participating/private/routes.go344
-rw-r--r--daemon/algod/api/server/v2/generated/participating/public/routes.go361
-rw-r--r--daemon/algod/api/server/v2/handlers.go212
-rw-r--r--daemon/algod/api/server/v2/handlers_test.go33
-rw-r--r--daemon/algod/api/server/v2/test/handlers_resources_test.go8
-rw-r--r--daemon/algod/api/server/v2/test/handlers_test.go649
-rw-r--r--daemon/algod/api/server/v2/test/helpers.go94
-rw-r--r--daemon/algod/api/server/v2/utils.go89
-rw-r--r--daemon/algod/server.go15
-rw-r--r--data/account/participationRegistry.go4
-rw-r--r--data/account/participation_test.go6
-rw-r--r--data/accountManager.go5
-rw-r--r--data/accountManager_test.go67
-rw-r--r--data/basics/units.go5
-rw-r--r--data/basics/units_test.go19
-rw-r--r--data/bookkeeping/genesis.go6
-rw-r--r--data/datatest/impls.go4
-rw-r--r--data/ledger.go18
-rw-r--r--data/ledger_test.go23
-rw-r--r--data/pools/transactionPool.go18
-rw-r--r--data/pools/transactionPool_test.go14
-rw-r--r--data/transactions/application.go45
-rw-r--r--data/transactions/application_test.go27
-rw-r--r--data/transactions/logic/README.md147
-rw-r--r--data/transactions/logic/README_in.md58
-rw-r--r--data/transactions/logic/TEAL_opcodes.md730
-rw-r--r--data/transactions/logic/assembler.go1034
-rw-r--r--data/transactions/logic/assembler_test.go1083
-rw-r--r--data/transactions/logic/backwardCompat_test.go34
-rw-r--r--data/transactions/logic/box.go6
-rw-r--r--data/transactions/logic/box_test.go256
-rw-r--r--data/transactions/logic/debugger.go30
-rw-r--r--data/transactions/logic/debugger_test.go18
-rw-r--r--data/transactions/logic/doc.go187
-rw-r--r--data/transactions/logic/doc_test.go46
-rw-r--r--data/transactions/logic/eval.go1132
-rw-r--r--data/transactions/logic/evalAppTxn_test.go629
-rw-r--r--data/transactions/logic/evalStateful_test.go2102
-rw-r--r--data/transactions/logic/eval_test.go253
-rw-r--r--data/transactions/logic/export_test.go6
-rw-r--r--data/transactions/logic/fields.go74
-rw-r--r--data/transactions/logic/fields_test.go39
-rw-r--r--data/transactions/logic/frames_test.go20
-rw-r--r--data/transactions/logic/langspec.json2456
-rw-r--r--data/transactions/logic/ledger_test.go34
-rw-r--r--data/transactions/logic/mocktracer/scenarios.go313
-rw-r--r--data/transactions/logic/mocktracer/tracer.go87
-rw-r--r--data/transactions/logic/opcodes.go125
-rw-r--r--data/transactions/logic/resources.go364
-rw-r--r--data/transactions/logic/resources_test.go870
-rw-r--r--data/transactions/logic/sourcemap.go2
-rw-r--r--data/transactions/logic/tracer.go45
-rw-r--r--data/transactions/msgp_gen.go294
-rw-r--r--data/transactions/teal.go7
-rw-r--r--data/transactions/transaction.go5
-rw-r--r--data/transactions/transaction_test.go27
-rw-r--r--data/transactions/verify/txn.go56
-rw-r--r--data/transactions/verify/txnBatch.go17
-rw-r--r--data/transactions/verify/txnBatch_test.go48
-rw-r--r--data/transactions/verify/txn_test.go9
-rw-r--r--data/txHandler.go49
-rw-r--r--data/txHandler_test.go62
-rw-r--r--data/txntest/txn.go7
-rw-r--r--docker/README.md67
-rw-r--r--docker/files/run/devmode_template.json3
-rw-r--r--docker/files/run/followermode_template.json51
-rwxr-xr-xdocker/files/run/run.sh60
-rw-r--r--docs/agreement_service.md4
-rw-r--r--go.mod12
-rw-r--r--go.sum25
-rw-r--r--installer/config.json.example1
-rw-r--r--ledger/acctdeltas.go213
-rw-r--r--ledger/acctdeltas_test.go351
-rw-r--r--ledger/acctonline.go198
-rw-r--r--ledger/acctonline_expired_test.go689
-rw-r--r--ledger/acctonline_test.go411
-rw-r--r--ledger/acctupdates.go40
-rw-r--r--ledger/acctupdates_test.go80
-rw-r--r--ledger/applications_test.go30
-rw-r--r--ledger/apply/application.go6
-rw-r--r--ledger/apply/application_test.go4
-rw-r--r--ledger/apply/apply.go2
-rw-r--r--ledger/apply/stateproof.go43
-rw-r--r--ledger/apply/stateproof_test.go275
-rw-r--r--ledger/apptxn_test.go2018
-rw-r--r--ledger/archival_test.go4
-rw-r--r--ledger/boxtxn_test.go93
-rw-r--r--ledger/bulletin.go7
-rw-r--r--ledger/catchpointtracker.go133
-rw-r--r--ledger/catchpointtracker_test.go389
-rw-r--r--ledger/catchpointwriter.go49
-rw-r--r--ledger/catchpointwriter_test.go232
-rw-r--r--ledger/catchupaccessor.go94
-rw-r--r--ledger/catchupaccessor_test.go125
-rw-r--r--ledger/double_test.go43
-rw-r--r--ledger/eval/appcow.go (renamed from ledger/internal/appcow.go)21
-rw-r--r--ledger/eval/appcow_test.go (renamed from ledger/internal/appcow_test.go)6
-rw-r--r--ledger/eval/applications.go (renamed from ledger/internal/applications.go)8
-rw-r--r--ledger/eval/assetcow.go (renamed from ledger/internal/assetcow.go)2
-rw-r--r--ledger/eval/cow.go (renamed from ledger/internal/cow.go)14
-rw-r--r--ledger/eval/cow_creatables.go (renamed from ledger/internal/cow_creatables.go)6
-rw-r--r--ledger/eval/cow_test.go (renamed from ledger/internal/cow_test.go)97
-rw-r--r--ledger/eval/eval.go (renamed from ledger/internal/eval.go)62
-rw-r--r--ledger/eval/eval_test.go (renamed from ledger/internal/eval_test.go)318
-rw-r--r--ledger/eval/evalindexer.go (renamed from ledger/internal/evalindexer.go)2
-rw-r--r--ledger/eval/prefetcher/error.go (renamed from ledger/internal/prefetcher/error.go)0
-rw-r--r--ledger/eval/prefetcher/prefetcher.go (renamed from ledger/internal/prefetcher/prefetcher.go)0
-rw-r--r--ledger/eval/prefetcher/prefetcher_alignment_test.go (renamed from ledger/internal/prefetcher/prefetcher_alignment_test.go)25
-rw-r--r--ledger/eval/prefetcher/prefetcher_test.go (renamed from ledger/internal/prefetcher/prefetcher_test.go)2
-rw-r--r--ledger/eval/prefetcher/prefetcher_whitebox_test.go (renamed from ledger/internal/prefetcher/prefetcher_whitebox_test.go)0
-rw-r--r--ledger/eval/txntracer.go185
-rw-r--r--ledger/eval/txntracer_test.go372
-rw-r--r--ledger/eval_simple_test.go209
-rw-r--r--ledger/evalbench_test.go10
-rw-r--r--ledger/evalindexer.go11
-rw-r--r--ledger/fullblock_perf_test.go14
-rw-r--r--ledger/ledger.go100
-rw-r--r--ledger/ledger_perf_test.go6
-rw-r--r--ledger/ledger_test.go496
-rw-r--r--ledger/ledgercore/catchpointlabel.go89
-rw-r--r--ledger/ledgercore/catchpointlabel_test.go30
-rw-r--r--ledger/ledgercore/error.go15
-rw-r--r--ledger/ledgercore/msgp_gen.go183
-rw-r--r--ledger/ledgercore/msgp_gen_test.go60
-rw-r--r--ledger/ledgercore/statedelta.go109
-rw-r--r--ledger/ledgercore/statedelta_test.go338
-rw-r--r--ledger/ledgercore/stateproofverification.go51
-rw-r--r--ledger/ledgercore/votersForRound.go13
-rw-r--r--ledger/metrics.go2
-rw-r--r--ledger/msgp_gen.go167
-rw-r--r--ledger/msgp_gen_test.go60
-rw-r--r--ledger/notifier.go2
-rw-r--r--ledger/simple_test.go62
-rw-r--r--ledger/simulation/simulation_eval_test.go1947
-rw-r--r--ledger/simulation/simulator.go99
-rw-r--r--ledger/simulation/simulator_test.go108
-rw-r--r--ledger/simulation/testing/utils.go70
-rw-r--r--ledger/simulation/trace.go90
-rw-r--r--ledger/simulation/tracer.go34
-rw-r--r--ledger/simulation/tracer_test.go5
-rw-r--r--ledger/spverificationtracker.go313
-rw-r--r--ledger/spverificationtracker_test.go492
-rw-r--r--ledger/store/merkle_committer.go75
-rw-r--r--ledger/store/trackerdb/catchpoint.go5
-rw-r--r--ledger/store/trackerdb/interface.go24
-rw-r--r--ledger/store/trackerdb/msgp_gen.go33
-rw-r--r--ledger/store/trackerdb/sqlitedriver/accountsV2.go51
-rw-r--r--ledger/store/trackerdb/sqlitedriver/catchpoint.go4
-rw-r--r--ledger/store/trackerdb/sqlitedriver/schema.go27
-rw-r--r--ledger/store/trackerdb/sqlitedriver/spVerificationAccessor.go164
-rw-r--r--ledger/store/trackerdb/sqlitedriver/store_sqlite_impl.go12
-rw-r--r--ledger/store/trackerdb/sqlitedriver/testing.go6
-rw-r--r--ledger/store/trackerdb/sqlitedriver/trackerdbV2.go29
-rw-r--r--ledger/store/trackerdb/store.go4
-rw-r--r--ledger/store/trackerdb/version.go2
-rw-r--r--ledger/testing/consensusRange.go8
-rw-r--r--ledger/testing/consensusRange_test.go2
-rw-r--r--ledger/testing/initState.go35
-rw-r--r--ledger/testing/testGenesis.go26
-rw-r--r--ledger/tracker.go83
-rw-r--r--ledger/tracker_test.go4
-rw-r--r--ledger/txtail.go2
-rw-r--r--ledger/txtail_test.go2
-rw-r--r--ledger/voters.go130
-rw-r--r--ledger/voters_test.go101
-rw-r--r--libgoal/libgoal.go31
-rw-r--r--libgoal/teal.go7
-rw-r--r--logging/telemetryspec/event.go12
-rw-r--r--logging/telemetryspec/metric.go2
-rw-r--r--netdeploy/remote/deployedNetwork.go2
-rw-r--r--netdeploy/remote/nodecfg/nodeConfigurator.go31
-rw-r--r--network/wsNetwork.go19
-rw-r--r--network/wsNetwork_test.go234
-rw-r--r--node/error.go21
-rw-r--r--node/follower_node.go33
-rw-r--r--node/follower_node_test.go75
-rw-r--r--node/node.go80
-rw-r--r--nodecontrol/algodControl.go8
-rw-r--r--protocol/consensus.go20
-rw-r--r--protocol/hash.go1
-rw-r--r--scripts/algorand_node_log.json58
-rwxr-xr-xscripts/build_package.sh2
-rwxr-xr-xscripts/configure_dev.sh3
-rwxr-xr-xscripts/dump_genesis.sh3
-rwxr-xr-xscripts/get_golang_version.sh2
-rw-r--r--scripts/release/mule/Makefile.mule2
-rw-r--r--[-rwxr-xr-x]scripts/travis/codecov23
-rwxr-xr-xscripts/travis/codegen_verification.sh11
-rwxr-xr-xscripts/travis/upload_coverage.sh14
-rwxr-xr-xscripts/upload_config.sh5
-rw-r--r--stateproof/abstractions.go2
-rw-r--r--stateproof/builder.go538
-rw-r--r--stateproof/db.go184
-rw-r--r--stateproof/db_test.go195
-rw-r--r--stateproof/msgp_gen.go298
-rw-r--r--stateproof/msgp_gen_test.go60
-rw-r--r--stateproof/signer.go122
-rw-r--r--stateproof/stateproofMessageGenerator.go8
-rw-r--r--stateproof/stateproofMessageGenerator_test.go324
-rw-r--r--stateproof/verify/stateproof.go68
-rw-r--r--stateproof/verify/stateproof_test.go74
-rw-r--r--stateproof/worker.go126
-rw-r--r--stateproof/worker_test.go1311
-rwxr-xr-x[-rw-r--r--]test/e2e-go/cli/goal/expect/goalAccountTest.exp10
-rw-r--r--test/e2e-go/cli/goal/expect/goalExpectCommon.exp40
-rw-r--r--test/e2e-go/features/accountPerf/sixMillion_test.go7
-rw-r--r--test/e2e-go/features/catchup/basicCatchup_test.go25
-rw-r--r--test/e2e-go/features/catchup/catchpointCatchup_test.go591
-rw-r--r--test/e2e-go/features/catchup/stateproofsCatchup_test.go286
-rw-r--r--test/e2e-go/features/devmode/devmode_test.go32
-rw-r--r--test/e2e-go/features/followerNode/syncDeltas_test.go4
-rw-r--r--test/e2e-go/features/participation/onlineOfflineParticipation_test.go4
-rw-r--r--test/e2e-go/features/participation/overlappingParticipationKeys_test.go4
-rw-r--r--test/e2e-go/features/stateproofs/stateproofs_test.go138
-rw-r--r--test/e2e-go/features/teal/compile_test.go12
-rw-r--r--test/e2e-go/features/transactions/asset_test.go2
-rw-r--r--test/e2e-go/restAPI/restClient_test.go431
-rw-r--r--test/framework/fixtures/restClientFixture.go4
-rwxr-xr-xtest/scripts/e2e_subs/access-previous-scratch.sh2
-rwxr-xr-xtest/scripts/e2e_subs/assets-app-b.sh2
-rwxr-xr-xtest/scripts/e2e_subs/assets-app.sh2
-rwxr-xr-xtest/scripts/e2e_subs/box-search.sh12
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-abi-arg.sh2
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-abi-method.sh4
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-bootloader.sh2
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-delete-clear.sh4
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-extra-pages.sh10
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-simulate.sh386
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-stateful-global.sh2
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-stateful-local.sh2
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-x-app-reads.sh2
-rwxr-xr-xtest/scripts/e2e_subs/goal-app-create-state-defaults.sh64
-rwxr-xr-xtest/scripts/e2e_subs/sectok-app.sh2
-rwxr-xr-xtest/scripts/e2e_subs/shared-resources.py97
-rwxr-xr-xtest/scripts/e2e_subs/teal-creatable-id.sh4
-rw-r--r--test/scripts/e2e_subs/tealprogs/logs-a-lot.teal141
-rw-r--r--test/testdata/consensus/catchpointtestingprotocol.json1
-rw-r--r--test/testdata/deployednettemplates/recipes/custom/configs/nonPartNode.json2
-rw-r--r--test/testdata/deployednettemplates/recipes/custom/configs/relay.json2
-rw-r--r--test/testdata/nettemplates/RichAccountStateProof.json2
-rw-r--r--test/testdata/nettemplates/StateProof.json2
-rw-r--r--test/testdata/nettemplates/ThreeNodesWithRichAcct.json48
-rw-r--r--tools/README.md12
-rw-r--r--tools/block-generator/README.md161
-rw-r--r--tools/block-generator/core/commands.go35
-rw-r--r--tools/block-generator/generator/daemon.go52
-rw-r--r--tools/block-generator/generator/generate.go870
-rw-r--r--tools/block-generator/generator/generate_test.go364
-rw-r--r--tools/block-generator/generator/make_transactions.go97
-rw-r--r--tools/block-generator/generator/server.go169
-rw-r--r--tools/block-generator/generator/server_test.go129
-rw-r--r--tools/block-generator/generator/utils.go81
-rw-r--r--tools/block-generator/generator/utils_test.go132
-rw-r--r--tools/block-generator/go.mod50
-rw-r--r--tools/block-generator/go.sum948
-rw-r--r--tools/block-generator/main.go26
-rw-r--r--tools/block-generator/requirements.txt1
-rwxr-xr-xtools/block-generator/run_postgres.sh49
-rwxr-xr-xtools/block-generator/run_runner.sh60
-rwxr-xr-xtools/block-generator/run_tests.sh97
-rw-r--r--tools/block-generator/runner/metrics_collector.go113
-rw-r--r--tools/block-generator/runner/run.go463
-rw-r--r--tools/block-generator/runner/runner.go63
-rw-r--r--tools/block-generator/runner/template/conduit.yml.tmpl61
-rw-r--r--tools/block-generator/scenarios/config.asset.close.yml16
-rw-r--r--tools/block-generator/scenarios/config.asset.destroy.yml16
-rw-r--r--tools/block-generator/scenarios/config.asset.xfer.yml15
-rw-r--r--tools/block-generator/scenarios/config.mixed.jumbo.yml19
-rw-r--r--tools/block-generator/scenarios/config.mixed.yml19
-rw-r--r--tools/block-generator/scenarios/config.payment.full.yml14
-rw-r--r--tools/block-generator/scenarios/config.payment.jumbo.yml14
-rw-r--r--tools/block-generator/scenarios/config.payment.small.yml14
-rw-r--r--tools/block-generator/test_config.yml23
-rw-r--r--tools/block-generator/upload_metrics.py53
-rw-r--r--tools/block-generator/util/util.go78
-rw-r--r--tools/debug/chopper/main.go230
-rw-r--r--tools/debug/dumpblocks/main.go2
-rwxr-xr-xtools/debug/jslog2
-rw-r--r--tools/x-repo-types/Makefile58
-rw-r--r--tools/x-repo-types/README.md52
-rw-r--r--tools/x-repo-types/go.mod20
-rw-r--r--tools/x-repo-types/go.sum29
-rw-r--r--tools/x-repo-types/typeAnalyzer/main.go85
-rw-r--r--tools/x-repo-types/typeAnalyzer/main.tmpl79
-rw-r--r--tools/x-repo-types/typeAnalyzer/typeAnalyzer.go580
-rw-r--r--tools/x-repo-types/typeAnalyzer/typeAnalyzer_test.go449
-rw-r--r--tools/x-repo-types/xrt.go333
-rw-r--r--tools/x-repo-types/xrt_test.go103
-rw-r--r--util/execpool/stream.go (renamed from data/transactions/verify/streamverifier.go)9
-rw-r--r--util/execpool/stream_test.go442
-rw-r--r--util/rateLimit.go20
-rw-r--r--util/rateLimit_test.go24
-rw-r--r--util/s3/s3Helper.go22
-rw-r--r--util/s3/s3Helper_test.go74
372 files changed, 36555 insertions, 10210 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index d98d70c1d..34e111cad 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -22,6 +22,11 @@ parameters:
valid_nightly_branch:
type: string
default: /hotfix\/.*/
+ # The following is intentional - hardcoding a token for public repos
+ # is recommended here to allow fork access
+ codecov:
+ type: string
+ default: "8b4a1f91-f154-4c26-b84c-c9aaa90159c6"
executors:
amd64_medium:
@@ -239,6 +244,8 @@ jobs:
executor: << parameters.platform >>_medium
working_directory: << pipeline.parameters.build_dir >>/project
parallelism: 32
+ environment:
+ CODECOV_TOKEN: << pipeline.parameters.codecov >>
steps:
- generic_build
- generic_test:
@@ -254,6 +261,8 @@ jobs:
executor: << parameters.platform >>_large
working_directory: << pipeline.parameters.build_dir >>/project
parallelism: 4
+ environment:
+ CODECOV_TOKEN: << pipeline.parameters.codecov >>
steps:
- generic_build
- generic_test:
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
index 880031609..edf673f01 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -2,7 +2,7 @@
name: "\U0001F41C Bug report"
about: Report a reproducible bug.
title: ''
-labels: new-bug
+labels: bug
assignees: ''
---
diff --git a/.github/workflows/pr-type-category.yml b/.github/workflows/pr-type-category.yml
index 8dd4cfcd2..478b0b90e 100644
--- a/.github/workflows/pr-type-category.yml
+++ b/.github/workflows/pr-type-category.yml
@@ -17,8 +17,10 @@ jobs:
labels: "New Feature, Enhancement, Bug-Fix, Not-Yet-Enabled, Skip-Release-Notes"
- name: "Checking for PR Category in PR title. Should be like '<category>: <pr title>'."
+ env:
+ PR_TITLE: ${{ github.event.pull_request.title }}
run: |
- if [[ ! "${{ github.event.pull_request.title }}" =~ ^.{2,}\:.{2,} ]]; then
+ if [[ ! "$PR_TITLE" =~ ^.{2,}\:.{2,} ]]; then
echo "## PR Category is missing from PR title. Please add it like '<category>: <pr title>'." >> GITHUB_STEP_SUMMARY
exit 1
fi
diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml
index ec1a85943..fcba97537 100644
--- a/.github/workflows/reviewdog.yml
+++ b/.github/workflows/reviewdog.yml
@@ -21,6 +21,7 @@ jobs:
- name: reviewdog-golangci-lint
uses: reviewdog/action-golangci-lint@v2
with:
+ go_version_file: go.mod
golangci_lint_version: "v1.47.3"
golangci_lint_flags: "-c .golangci.yml --allow-parallel-runners"
reporter: "github-pr-check"
diff --git a/.gitignore b/.gitignore
index 978115444..7c774b834 100644
--- a/.gitignore
+++ b/.gitignore
@@ -68,3 +68,9 @@ index.html
# test summary
testresults.json
+
+# block generator binary
+tools/block-generator/block-generator
+
+# cross repo types tool binary
+tools/x-repo-types/x-repo-types
diff --git a/.golangci.yml b/.golangci.yml
index ff7892d4f..40299509b 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -13,6 +13,7 @@ linters:
- govet
- ineffassign
- misspell
+ - nilerr
- nolintlint
- revive
- staticcheck
@@ -30,11 +31,6 @@ linters-settings:
require-explanation: true
errcheck:
exclude-functions:
- # data/transactions/logic/assembler.go uses ops.error, warn, to append log messages: OK to ignore for this case
- - (*github.com/algorand/go-algorand/data/transactions/logic.OpStream).errorf
- - (*github.com/algorand/go-algorand/data/transactions/logic.OpStream).error
- - (*github.com/algorand/go-algorand/data/transactions/logic.OpStream).warnf
- - (*github.com/algorand/go-algorand/data/transactions/logic.OpStream).warn
# We do this 121 times and never check the error.
- (*github.com/spf13/cobra.Command).MarkFlagRequired
govet:
@@ -61,9 +57,6 @@ linters-settings:
- (github.com/algorand/go-algorand/logging.Logger).Error
- (github.com/algorand/go-algorand/logging.Logger).Fatal
- (github.com/algorand/go-algorand/logging.Logger).Panic
- - (github.com/algorand/go-algorand/data/transactions/logic.OpStream).warnf
- - (github.com/algorand/go-algorand/data/transactions/logic.OpStream).errorf
- - (github.com/algorand/go-algorand/data/transactions/logic.OpStream).lineErrorf
- (github.com/algorand/go-algorand/cmd/goal/main).reportInfof
- (github.com/algorand/go-algorand/cmd/goal/main).reportInfoln
- (github.com/algorand/go-algorand/cmd/goal/main).reportWarnf
@@ -114,6 +107,7 @@ issues:
# - govet
- ineffassign
- misspell
+ # - nilerr
- nolintlint
# - revive
- staticcheck
diff --git a/CODEOWNERS b/CODEOWNERS
new file mode 100644
index 000000000..3c88c6e71
--- /dev/null
+++ b/CODEOWNERS
@@ -0,0 +1,2 @@
+.github/ @algorand/devops
+.circleci/ @algorand/devops
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..966aa2cf9
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,128 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, religion, or sexual identity
+and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+ and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the
+ overall community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or
+ advances of any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email
+ address, without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official e-mail address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at
+conduct@algorand.com.
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series
+of actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or
+permanent ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior, harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within
+the community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.0, available at
+https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
+
+Community Impact Guidelines were inspired by [Mozilla's code of conduct
+enforcement ladder](https://github.com/mozilla/diversity).
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see the FAQ at
+https://www.contributor-covenant.org/faq. Translations are available at
+https://www.contributor-covenant.org/translations.
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 04f8c9508..461f9eebe 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -2,58 +2,62 @@
If you are interested in contributing to the project, we welcome and thank you. We want to make the best decentralized and effective blockchain platform available and we appreciate your willingness to help us.
-The [Algorand GitHub Organization](https://github.com/algorand) has all of our open source projects, and dependencies which we fork and use in those projects. This contribution guide applies to all of these.
+The [Algorand GitHub Organization](https://github.com/algorand) has all of our open source projects, and dependencies which we fork and use in those projects. While technical details in this document are specific to `go-algorand`, the general ideas are applicable to all of our projects.
-Some of our most active projects include:
-* [go-algorand](https://github.com/algorand/go-algorand) - Algorand node software (this repository)
-* [go-algorand-sdk](https://github.com/algorand/go-algorand-sdk) - Golang SDK
-* [java-algorand-sdk](https://github.com/algorand/java-algorand-sdk) - Java SDK
-* [js-algorand-sdk](https://github.com/algorand/js-algorand-sdk) - JavaScript SDK
-* [indexer](https://github.com/algorand/indexer) - Blockchain analytics database
-* [ledger-app-algorand](https://github.com/algorand/ledger-app-algorand) - Ledger hardware wallet application
-* [mule](https://github.com/algorand/mule) - Continuous Integration automation tool
-* [py-algorand-sdk](https://github.com/algorand/py-algorand-sdk) - Python SDK
-* [sandbox](https://github.com/algorand/sandbox) - Algorand node quickstart tool
+## Non-code Contributions
-## Filing Issues
+While contributions come in many forms, this document is focused on code. For other types of involvement, see the following:
+* [Reporting issues and features requests.][go-algorand-issues]
+* [Security vulnerability disclosures.][security-disclosure]
+* [Documentation improvements.][algorand-docs]
-Did you discover a bug? Do you have a feature request? Filing issues is an easy way anyone can contribute and helps us improve Algorand. We use GitHub Issues to track all known bugs and feature requests.
+## Contribution Model
-Before logging an issue be sure to check current issues, verify that your [node is synced](https://developer.algorand.org/docs/introduction-installing-node#sync-node), check the [Developer Frequently Asked Questions](https://developer.algorand.org/docs/developer-faq) and [GitHub issues][issues_url] to see if your issue is described there.
+All changes to `go-algorand` are made through the same process: a pull request targeting the `master` branch. This goes for internal and external contributions. To familiarize yourself with the process we recommend that you review the current open pull requests, and the GitHub documentation for [creating a pull request from a fork][gh-pr-process].
-If you’d like to contribute to any of the repositories, please file a [GitHub issue][issues_url] using the issues menu item. Make sure to specify whether you are describing a bug or a new enhancement using the **Bug report** or **Feature request** button.
+Note: some of our other projects are using gitflow, for these the process is the same but you will target pull requests against the `develop` branch.
-See the GitHub help guide for more information on [filing an issue](https://help.github.com/en/articles/creating-an-issue).
+## Communication Channels
-## Security / Vulnerabilities
+The core development team monitors the Algorand [discord community](https://discord.gg/algorand) and regularly responds to questions and suggestions. For very technical questions and implementation discussions GitHub Issues and Pull Requests are a good way to reach maintainers.
-Please refer to our [SECURITY](SECURITY.md) document.
+## Pull Requests
-If you have any questions, don't hesitate to contact us at security@algorand.com.
+All changes are are made via pull requests.
-## Contribution Model
+Small changes are easier to review and merge than large ones, so the more focused a PR the better. If a feature requires refactoring, the refactoring should be a separate PR. If refactoring uncovers a bug, the fix should be a separate PR. These are not strict rules, but generally speaking, they make things easier to review which speeds up the PR process.
-For each of our repositories we use the same model for contributing code. Developers wanting to contribute must create pull requests. This process is described in the GitHub [Creating a pull request from a fork](https://help.github.com/en/articles/creating-a-pull-request-from-a-fork) documentation. Each pull request should be initiated against the `master` branch in the Algorand repository. After a pull request is submitted the core development team will review the submission and communicate with the developer using the comments sections of the PR. After the submission is reviewed and approved, it will be merged into the `master` branch of the source. These changes will be merged to our release branch on the next viable release date. For the SDKs, this may be immediate. Changes to the node software may take more time as we must ensure and verify the security, as well as apply protocol upgrades in an orderly way.
+### General Guidelines
-Note: some of our projects are using gitflow, for these you will open pull requests against the `develop` branch.
+* Have a clear well-formatted description in the pull request. This helps reviewers and later serves as documentation in the release notes.
+* Code must adhere to the [Go formatting guidelines](https://golang.org/doc/effective_go.html).
+* All tests must be passing.
+* New unit and integration tests should be added to ensure correctness and prevent regressions where appropriate.
+* Run linting and code formatting tools, see [the README](README.md) for details.
+* All CI checks should pass.
+* Use draft mode for PRs that are still in progress.
-Again, if you have a patch for a critical security vulnerability, please use our [vulnerability disclosure form][vuln_url] instead of creating a PR. We'll follow up with you on distributing the patch before we merge it.
+### Peer Review
-## Code Guidelines
+This is the single most important part of introducing new code to `go-algorand`.
-For Go code we use the [Golang guidelines defined here](https://golang.org/doc/effective_go.html).
-* Code must adhere to the official Go formatting guidelines (i.e. uses gofmt).
-* We use **gofmt** and **golangci-lint**. Also make sure to run `make sanity` and `make generate` before opening a pull request.
-* Code must be documented adhering to the official Go commentary guidelines.
+#### Concept Review
-For JavaScript code we use the [MDN formatting rules](https://developer.mozilla.org/en-US/docs/MDN/Contribute/Guidelines/Code_guidelines/JavaScript).
+Because code reviews are a considerable time commitment, the first step for peer review is convincing reviewers that it is worth their time. Typically this is done by keeping changes small, writing a thorough description to clearly explain the need for a given improvement, or discussing larger changes ahead of time through one of the communication channels.
-For Java code we use [Oracle’s standard formatting rules for Java](https://www.oracle.com/technetwork/java/codeconventions-150003.pdf).
+If reviewers are not convinced about the merits of a change, they may reject a PR instead of reviewing it. All rejections should include the rationale for how that decision was reached. It is not uncommon for this to occur. Some users opt to maintain long running forks to add features which are not suitable for the upstream repo at this time.
-## Communication Channels
+#### Code Review
+
+Reviewers will leave feedback directly on the pull request, typically inline with the code. This is an opportunity to discuss the changes. If a PR is left open with unresolved feedback it may eventually be closed.
+
+The project maintainers are responsible for the code in `go-algorand`, so ultimately whether or not a pull request is merged depends on their involvement.
+
+#### Merge
-The core development team monitors the Algorand community forums and regularly responds to questions and suggestions. Issues and Pull Requests are handled on GitHub.
+All changes are subject to a minimum of two reviews from subject matter experts prior to merge. Once this approval is reached a small number of committers are responsible for merging the changes. The list of committers is limited for practical and security reasons.
-[issues_url]: https://github.com/algorand/go-algorand/issues
-[vuln_url]: https://www.algorand.com/resources/blog/security
-[bug_bounty_url]: https://bugcrowd.com/algorand
+[gh-pr-process]: https://help.github.com/en/articles/creating-a-pull-request-from-a-fork
+[go-algorand-issues]: https://github.com/algorand/go-algorand/issues/new/choose
+[security-disclosure]: https://github.com/algorand/go-algorand/security/policy
+[algorand-docs]: https://github.com/algorand/docs/blob/staging/CONTRIBUTING.md
diff --git a/Dockerfile b/Dockerfile
index aaa7b6c3b..859e602d4 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -49,12 +49,10 @@ ENV PATH="/node/bin:${PATH}" ALGOD_PORT="8080" KMD_PORT="7833" ALGORAND_DATA="/a
RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates curl && \
rm -rf /var/lib/apt/lists/* && \
mkdir -p "$ALGORAND_DATA" && \
- groupadd --system algorand && \
- useradd --no-log-init --create-home --system --gid algorand algorand && \
+ groupadd --gid=999 --system algorand && \
+ useradd --uid=999 --no-log-init --create-home --system --gid algorand algorand && \
chown -R algorand:algorand /algod
-USER algorand
-
COPY --chown=algorand:algorand --from=builder "/dist/bin/" "/node/bin/"
COPY --chown=algorand:algorand --from=builder "/dist/files/run/" "/node/run/"
diff --git a/Makefile b/Makefile
index d17855e6d..450ab6999 100644
--- a/Makefile
+++ b/Makefile
@@ -13,6 +13,13 @@ ARCH := $(shell ./scripts/archtype.sh)
OS_TYPE := $(shell ./scripts/ostype.sh)
S3_RELEASE_BUCKET = $$S3_RELEASE_BUCKET
+GOLANG_VERSIONS := $(shell ./scripts/get_golang_version.sh all)
+GOLANG_VERSION_BUILD := $(firstword $(GOLANG_VERSIONS))
+GOLANG_VERSION_SUPPORT := $(lastword $(GOLANG_VERSIONS))
+GOLANG_VERSION_BUILD_MAJOR := $(shell echo $(GOLANG_VERSION_BUILD) | cut -d'.' -f1,2)
+CURRENT_GO_VERSION := $(shell go version | cut -d " " -f 3 | tr -d 'go')
+CURRENT_GO_VERSION_MAJOR := $(shell echo $(CURRENT_GO_VERSION) | cut -d'.' -f1,2)
+
# If build number already set, use it - to ensure same build number across multiple platforms being built
BUILDNUMBER ?= $(shell ./scripts/compute_build_number.sh)
FULLBUILDNUMBER ?= $(shell ./scripts/compute_build_number.sh -f)
@@ -101,10 +108,19 @@ fix: build
lint: deps
$(GOPATH1)/bin/golangci-lint run -c .golangci.yml
+check_go_version:
+ @if [ $(CURRENT_GO_VERSION_MAJOR) != $(GOLANG_VERSION_BUILD_MAJOR) ]; then \
+ echo "Wrong major version of Go installed ($(CURRENT_GO_VERSION_MAJOR)). Please use $(GOLANG_VERSION_BUILD_MAJOR)"; \
+ exit 1; \
+ fi
+
+tidy: check_go_version
+ go mod tidy -compat=$(GOLANG_VERSION_SUPPORT)
+
check_shell:
find . -type f -name "*.sh" -exec shellcheck {} +
-sanity: fix lint fmt
+sanity: fix lint fmt tidy
cover:
go test $(GOTAGS) -coverprofile=cover.out $(UNIT_TEST_SOURCES)
@@ -186,7 +202,7 @@ rebuild_kmd_swagger: deps
# develop
-build: buildsrc
+build: buildsrc buildsrc-special
# We're making an empty file in the go-cache dir to
# get around a bug in go build where it will fail
@@ -197,6 +213,10 @@ buildsrc: check-go-version crypto/libs/$(OS_TYPE)/$(ARCH)/lib/libsodium.a node_e
touch "${GOCACHE}"/file.txt && \
go install $(GOTRIMPATH) $(GOTAGS) $(GOBUILDMODE) -ldflags="$(GOLDFLAGS)" ./...
+buildsrc-special:
+ cd tools/block-generator && \
+ go install $(GOTRIMPATH) $(GOTAGS) $(GOBUILDMODE) -ldflags="$(GOLDFLAGS)" ./...
+
check-go-version:
./scripts/check_golang_version.sh build
diff --git a/README.md b/README.md
index 9e787d6d4..b8e3f8207 100644
--- a/README.md
+++ b/README.md
@@ -1,26 +1,26 @@
| rel/stable <br> [![CircleCI](https://circleci.com/gh/algorand/go-algorand/tree/rel%2Fstable.svg?style=svg)](https://circleci.com/gh/algorand/go-algorand/tree/rel%2Fstable) | rel/beta <br> [![CircleCI](https://circleci.com/gh/algorand/go-algorand/tree/rel%2Fbeta.svg?style=svg)](https://circleci.com/gh/algorand/go-algorand/tree/rel%2Fbeta) | rel/nightly <br> [![CircleCI](https://circleci.com/gh/algorand/go-algorand/tree/rel%2Fnightly.svg?style=svg)](https://circleci.com/gh/algorand/go-algorand/tree/rel%2Fnightly) |
| --- | --- | --- |
-go-algorand
-====================
+# go-algorand
+
Algorand's official implementation in Go.
Algorand is a permissionless, pure proof-of-stake blockchain that delivers
decentralization, scalability, security, and transaction finality.
-## Getting Started ##
+## Getting Started
Our [developer website][developer site url] has the most up to date information
about using and installing the Algorand platform.
-## Building from source ##
+## Building from source
Development is done using the [Go Programming Language](https://golang.org/).
The version of go is specified in the project's [go.mod](go.mod) file. This document assumes that you have a functioning
environment setup. If you need assistance setting up an environment please visit
the [official Go documentation website](https://golang.org/doc/).
-### Linux / OSX ###
+### Linux / OSX
We currently strive to support Debian-based distributions with Ubuntu 18.04
being our official release target.
@@ -89,17 +89,18 @@ ${GOPATH}/bin/goal node start -d ~/testnet_data
Genesis files for mainnet, testnet, and betanet can be found in
`installer/genesis/`.
-## Contributing (Code, Documentation, Bugs, Etc) ##
+## Contributing
Please refer to our [CONTRIBUTING](CONTRIBUTING.md) document.
-## Project Layout ##
+## Project Layout
+
+`go-algorand` is split into various subsystems containing varius packages.
-`go-algorand` is split into various subpackages.
+### Core
-The following packages provide core functionality to the `algod` and `kmd`
-daemons, as well as other tools and commands:
+Provides core functionality to the `algod` and `kmd` daemons, as well as other tools and commands:
- `crypto` contains the cryptographic constructions we're using for hashing,
signatures, and VRFs. There are also some Algorand-specific details here
@@ -147,7 +148,9 @@ daemons, as well as other tools and commands:
- `node` integrates the components above and handles initialization and
shutdown. It provides queries into these components.
-`daemon` defines the two daemons which provide Algorand clients with services:
+### Daemon
+
+Contains the two daemons which provide Algorand clients with services:
- `daemon/algod` holds the `algod` daemon, which implements a participating
node. `algod` allows a node to participate in the agreement protocol,
@@ -158,7 +161,9 @@ daemons, as well as other tools and commands:
daemon allows a node to sign transactions. Because `kmd` is separate from
`algod`, `kmd` allows a user to sign transactions on an air-gapped computer.
-The following packages allow developers to interface with the Algorand system:
+### Interfacing
+
+Allows developers to interface with the Algorand system:
- `cmd` holds the primary commands defining entry points into the system.
- `cmd/catchupsrv` ([README](cmd/catchupsrv/README.md)) is a tool to
@@ -168,8 +173,8 @@ The following packages allow developers to interface with the Algorand system:
- `tools/debug` holds secondary commands which assist developers during debugging.
- `tools/misc` ([README](tools/misc/README.md)) small tools that are sometimes handy in a pinch.
-The following packages contain tools to help Algorand developers deploy networks
-of their own:
+### Deployment
+Help Algorand developers deploy networks of their own:
- `nodecontrol`
- `docker`
@@ -178,12 +183,14 @@ of their own:
- `components`
- `netdeploy`
-A number of packages provide utilities for the various components:
+### Utilities
+Provides utilities for the various components:
- `logging` is a wrapper around `logrus`.
- `util` contains a variety of utilities, including a codec, a SQLite wrapper,
a goroutine pool, a timer interface, node metrics, and more.
+### Test
`test` ([README](test/README.md)) contains end-to-end tests and utilities for the above components.
diff --git a/agreement/README.md b/agreement/README.md
index 6c627a6f9..a1b80e862 100644
--- a/agreement/README.md
+++ b/agreement/README.md
@@ -40,7 +40,7 @@ parameters:
If `Ledger` and `db.Accessor` provide crash-safe storage, `agreement`
will also recover safely after crashes.
-# Specification
+## Specification
The specification for the protocol implemented by this package is
located [here](https://github.com/algorandfoundation/specs).
@@ -48,7 +48,7 @@ located [here](https://github.com/algorandfoundation/specs).
Optimizations from and other deviations from the spec will be noted
throughout this file.
-## Terminology
+### Terminology
Certain terms in this implementation are used as shorthands for
specific concepts:
@@ -68,7 +68,7 @@ specific concepts:
a quorum of soft votes (i.e., sigma(S, r, p)).
- Additional terminology is described in the [agreement service doc](../docs/agreement_service.md).
-# Design
+## Design
At the top level, an `agreement.Service` encapsulates the parameters
and the goroutines which execute the protocol.
@@ -97,7 +97,7 @@ data structures, such as `vote`, `bundle`, and `proposal`. These data
types have distinct unauthenticated versions, which allows routines to
specify that they accept untrusted input.
-# Concurrent Component
+## Concurrent Component
The `demux` object demultiplexes over a variety of channels which all
represent inputs to the system. Inputs include:
@@ -121,7 +121,7 @@ machine validates these messages in the same way it validates real
network messages and relays them back into the network as
appropriate.
-## Spec Notes: Additional Events
+### Spec Notes: Additional Events
Because signature verification is expected to be a computational
bottleneck in the agreement code, it executes concurrently with
@@ -154,7 +154,7 @@ verification concurrently. Moreover, the implementation abstracts
over multiple keys by generating synthetic network events from the
`pseudonode`.
-# Serialized State Machine
+## Serialized State Machine
The logic of the agreement protocol is implemented as a state machine.
This state machine is composed of many smaller state machines, which
@@ -356,5 +356,5 @@ a given period.
The staging slot for a given period is important because its state is
the precursor to cert and next votes. Once both a soft threshold for a
value and the `Block` corresponding to this value has been observed by
-the node, a `proposalCommittableEvent` is emitted, which indicates
+the node, a proposal `committableEvent` is emitted, which indicates
that the node may cert or next-vote for the proposal.
diff --git a/agreement/abstractions.go b/agreement/abstractions.go
index f5d09dc1e..44aafa4fd 100644
--- a/agreement/abstractions.go
+++ b/agreement/abstractions.go
@@ -133,14 +133,14 @@ type LedgerReader interface {
// protocol may lose liveness.
LookupAgreement(basics.Round, basics.Address) (basics.OnlineAccountData, error)
- // Circulation returns the total amount of money in circulation at the
- // conclusion of a given round.
+ // Circulation returns the total amount of online money in circulation at the
+ // conclusion of a given round rnd that is eligible for voting at voteRnd.
//
// This method returns an error if the given Round has not yet been
// confirmed. It may also return an error if the given Round is
// unavailable by the storage device. In that case, the agreement
// protocol may lose liveness.
- Circulation(basics.Round) (basics.MicroAlgos, error)
+ Circulation(rnd basics.Round, voteRnd basics.Round) (basics.MicroAlgos, error)
// LookupDigest returns the Digest of the entry that was agreed on in a
// given round.
diff --git a/agreement/agreementtest/simulate.go b/agreement/agreementtest/simulate.go
index 225d0632c..8d8b713c6 100644
--- a/agreement/agreementtest/simulate.go
+++ b/agreement/agreementtest/simulate.go
@@ -129,7 +129,7 @@ func (b *blackhole) Address() (string, bool) {
// CryptoRandomSource is a random source that is based off our crypto library.
type CryptoRandomSource struct{}
-// Uint64 implements the randomness by calling hte crypto library.
+// Uint64 implements the randomness by calling the crypto library.
func (c *CryptoRandomSource) Uint64() uint64 {
return crypto.RandUint64()
}
diff --git a/agreement/agreementtest/simulate_test.go b/agreement/agreementtest/simulate_test.go
index 6d8b2b43b..051659207 100644
--- a/agreement/agreementtest/simulate_test.go
+++ b/agreement/agreementtest/simulate_test.go
@@ -215,7 +215,7 @@ func (l *testLedger) LookupAgreement(r basics.Round, a basics.Address) (basics.O
return l.state[a].OnlineAccountData(), nil
}
-func (l *testLedger) Circulation(r basics.Round) (basics.MicroAlgos, error) {
+func (l *testLedger) Circulation(r basics.Round, voteRnd basics.Round) (basics.MicroAlgos, error) {
l.mu.Lock()
defer l.mu.Unlock()
diff --git a/agreement/common_test.go b/agreement/common_test.go
index 361ab2e2a..8f2ad8c4f 100644
--- a/agreement/common_test.go
+++ b/agreement/common_test.go
@@ -335,7 +335,7 @@ func (l *testLedger) LookupAgreement(r basics.Round, a basics.Address) (basics.O
return l.state[a].OnlineAccountData(), nil
}
-func (l *testLedger) Circulation(r basics.Round) (basics.MicroAlgos, error) {
+func (l *testLedger) Circulation(r basics.Round, voteRnd basics.Round) (basics.MicroAlgos, error) {
l.mu.Lock()
defer l.mu.Unlock()
diff --git a/agreement/demux_test.go b/agreement/demux_test.go
index 692d2cc5f..027dbc9e1 100644
--- a/agreement/demux_test.go
+++ b/agreement/demux_test.go
@@ -495,7 +495,7 @@ func (t *demuxTester) LookupAgreement(basics.Round, basics.Address) (basics.Onli
}
// implement Ledger
-func (t *demuxTester) Circulation(basics.Round) (basics.MicroAlgos, error) {
+func (t *demuxTester) Circulation(basics.Round, basics.Round) (basics.MicroAlgos, error) {
// we don't care about this function in this test.
return basics.MicroAlgos{}, nil
}
diff --git a/agreement/fuzzer/ledger_test.go b/agreement/fuzzer/ledger_test.go
index c0ffb8b53..a62caee4d 100644
--- a/agreement/fuzzer/ledger_test.go
+++ b/agreement/fuzzer/ledger_test.go
@@ -236,7 +236,7 @@ func (l *testLedger) LookupAgreement(r basics.Round, a basics.Address) (basics.O
return l.state[a].OnlineAccountData(), nil
}
-func (l *testLedger) Circulation(r basics.Round) (basics.MicroAlgos, error) {
+func (l *testLedger) Circulation(r basics.Round, voteRnd basics.Round) (basics.MicroAlgos, error) {
l.mu.Lock()
defer l.mu.Unlock()
diff --git a/agreement/selector.go b/agreement/selector.go
index 505d54188..ba90be3b8 100644
--- a/agreement/selector.go
+++ b/agreement/selector.go
@@ -70,7 +70,7 @@ func membership(l LedgerReader, addr basics.Address, r basics.Round, p period, s
return
}
- total, err := l.Circulation(balanceRound)
+ total, err := l.Circulation(balanceRound, r)
if err != nil {
err = fmt.Errorf("Service.initializeVote (r=%d): Failed to obtain total circulation in round %d: %v", r, balanceRound, err)
return
diff --git a/catchup/catchpointService.go b/catchup/catchpointService.go
index 974b76ef9..720f25790 100644
--- a/catchup/catchpointService.go
+++ b/catchup/catchpointService.go
@@ -22,8 +22,6 @@ import (
"sync"
"time"
- "github.com/algorand/go-algorand/stateproof"
-
"github.com/algorand/go-deadlock"
"github.com/algorand/go-algorand/config"
@@ -33,6 +31,7 @@ import (
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
+ "github.com/algorand/go-algorand/stateproof"
)
const (
@@ -259,6 +258,7 @@ func (cs *CatchpointCatchupService) processStageInactive() (err error) {
if err != nil {
return cs.abort(fmt.Errorf("processStageInactive failed to set a catchpoint label : %v", err))
}
+
err = cs.updateStage(ledger.CatchpointCatchupStateLedgerDownload)
if err != nil {
return cs.abort(fmt.Errorf("processStageInactive failed to update stage : %v", err))
diff --git a/catchup/service.go b/catchup/service.go
index 21c104ea4..ac5fa730d 100644
--- a/catchup/service.go
+++ b/catchup/service.go
@@ -83,7 +83,7 @@ type Service struct {
deadlineTimeout time.Duration
blockValidationPool execpool.BacklogPool
- // suspendForCatchpointWriting defines whether we've ran into a state where the ledger is currently busy writing the
+ // suspendForCatchpointWriting defines whether we've run into a state where the ledger is currently busy writing the
// catchpoint file. If so, we want to suspend the catchup process until the catchpoint file writing is complete,
// and resume from there without stopping the catchup timer.
suspendForCatchpointWriting bool
@@ -157,6 +157,16 @@ func (s *Service) IsSynchronizing() (synchronizing bool, initialSync bool) {
return
}
+// triggerSync attempts to wake up the sync loop.
+func (s *Service) triggerSync() {
+ if syncing, initial := s.IsSynchronizing(); !syncing && !initial {
+ select {
+ case s.syncNow <- struct{}{}:
+ default:
+ }
+ }
+}
+
// SetDisableSyncRound attempts to set the first round we _do_not_ want to fetch from the network
// Blocks from disableSyncRound or any round after disableSyncRound will not be fetched while this is set
func (s *Service) SetDisableSyncRound(rnd uint64) error {
@@ -164,18 +174,14 @@ func (s *Service) SetDisableSyncRound(rnd uint64) error {
return ErrSyncRoundInvalid
}
atomic.StoreUint64(&s.disableSyncRound, rnd)
- if syncing, initial := s.IsSynchronizing(); !syncing && !initial {
- s.syncNow <- struct{}{}
- }
+ s.triggerSync()
return nil
}
// UnsetDisableSyncRound removes any previously set disabled sync round
func (s *Service) UnsetDisableSyncRound() {
atomic.StoreUint64(&s.disableSyncRound, 0)
- if syncing, initial := s.IsSynchronizing(); !syncing && !initial {
- s.syncNow <- struct{}{}
- }
+ s.triggerSync()
}
// GetDisableSyncRound returns the disabled sync round
@@ -233,10 +239,10 @@ func (s *Service) innerFetch(r basics.Round, peer network.Peer) (blk *bookkeepin
// fetchAndWrite fetches a block, checks the cert, and writes it to the ledger. Cert checking and ledger writing both wait for the ledger to advance if necessary.
// Returns false if we should stop trying to catch up. This may occur for several reasons:
-// - If the context is canceled (e.g. if the node is shutting down)
-// - If we couldn't fetch the block (e.g. if there are no peers available or we've reached the catchupRetryLimit)
-// - If the block is already in the ledger (e.g. if agreement service has already written it)
-// - If the retrieval of the previous block was unsuccessful
+// - If the context is canceled (e.g. if the node is shutting down)
+// - If we couldn't fetch the block (e.g. if there are no peers available, or we've reached the catchupRetryLimit)
+// - If the block is already in the ledger (e.g. if agreement service has already written it)
+// - If the retrieval of the previous block was unsuccessful
func (s *Service) fetchAndWrite(r basics.Round, prevFetchCompleteChan chan bool, lookbackComplete chan bool, peerSelector *peerSelector) bool {
// If sync-ing this round is not intended, don't fetch it
if dontSyncRound := s.GetDisableSyncRound(); dontSyncRound != 0 && r >= basics.Round(dontSyncRound) {
@@ -258,10 +264,10 @@ func (s *Service) fetchAndWrite(r basics.Round, prevFetchCompleteChan chan bool,
loggedMessage := fmt.Sprintf("fetchAndWrite(%d): block retrieval exceeded retry limit", r)
if _, initialSync := s.IsSynchronizing(); initialSync {
// on the initial sync, it's completly expected that we won't be able to get all the "next" blocks.
- // Therefore info should suffice.
+ // Therefore, info should suffice.
s.log.Info(loggedMessage)
} else {
- // On any subsequent sync, we migth be looking for multiple rounds into the future, so it's completly
+ // On any subsequent sync, we might be looking for multiple rounds into the future, so it's completely
// reasonable that we would fail retrieving the future block.
// Generate a warning here only if we're failing to retrieve X+1 or below.
// All other block retrievals should not generate a warning.
@@ -294,7 +300,7 @@ func (s *Service) fetchAndWrite(r basics.Round, prevFetchCompleteChan chan bool,
s.log.Debugf("fetchAndWrite(%v): Could not fetch: %v (attempt %d)", r, err, i)
peerSelector.rankPeer(psp, peerRankDownloadFailed)
// we've just failed to retrieve a block; wait until the previous block is fetched before trying again
- // to avoid the usecase where the first block doesn't exists and we're making many requests down the chain
+ // to avoid the usecase where the first block doesn't exist, and we're making many requests down the chain
// for no reason.
if !hasLookback {
select {
@@ -479,7 +485,7 @@ func (s *Service) pipelinedFetch(seedLookback uint64) {
go func() {
defer wg.Done()
for t := range taskCh {
- completed <- t() // This write to completed comes after a read from taskCh, so the invariant is preserved.
+ completed <- t() // This write comes after a read from taskCh, so the invariant is preserved.
}
}()
}
@@ -632,10 +638,10 @@ func (s *Service) periodicSync() {
}
// Syncs the client with the network. sync asks the network for last known block and tries to sync the system
-// up the to the highest number it gets.
+// up to the highest number it gets.
func (s *Service) sync() {
// Only run sync once at a time
- // Store start time of sync - in NS so we can compute time.Duration (which is based on NS)
+ // Store start time of sync - in NS, so we can compute time.Duration (which is based on NS)
start := time.Now()
timeInNS := start.UnixNano()
diff --git a/catchup/service_test.go b/catchup/service_test.go
index 4d1bafb28..217c23bf7 100644
--- a/catchup/service_test.go
+++ b/catchup/service_test.go
@@ -796,7 +796,7 @@ func (m *mockedLedger) Block(r basics.Round) (bookkeeping.Block, error) {
func (m *mockedLedger) Lookup(basics.Round, basics.Address) (basics.AccountData, error) {
return basics.AccountData{}, errors.New("not needed for mockedLedger")
}
-func (m *mockedLedger) Circulation(basics.Round) (basics.MicroAlgos, error) {
+func (m *mockedLedger) Circulation(basics.Round, basics.Round) (basics.MicroAlgos, error) {
return basics.MicroAlgos{}, errors.New("not needed for mockedLedger")
}
func (m *mockedLedger) ConsensusVersion(basics.Round) (protocol.ConsensusVersion, error) {
diff --git a/cmd/algocfg/profileCommand.go b/cmd/algocfg/profileCommand.go
index bbddfa26c..6a80e7d64 100644
--- a/cmd/algocfg/profileCommand.go
+++ b/cmd/algocfg/profileCommand.go
@@ -143,7 +143,7 @@ var setProfileCmd = &cobra.Command{
}
file := filepath.Join(dataDir, config.ConfigFilename)
if _, err := os.Stat(file); !forceUpdate && err == nil {
- fmt.Printf("A config.json file already exists for this data directory. Would you like to overwrite it? (Y/n)")
+ fmt.Printf("A config.json file already exists at %s\nWould you like to overwrite it? (Y/n)", file)
reader := bufio.NewReader(os.Stdin)
resp, err := reader.ReadString('\n')
resp = strings.TrimSpace(resp)
@@ -169,5 +169,10 @@ func getConfigForArg(configType string) (config.Local, error) {
if updater, ok := profileNames[configType]; ok {
return updater.updateFunc(cfg), nil
}
- return config.Local{}, fmt.Errorf("invalid profile type %v", configType)
+
+ var names []string
+ for name := range profileNames {
+ names = append(names, name)
+ }
+ return config.Local{}, fmt.Errorf("unknown profile provided: '%s' is not in list of valid profiles: %s", configType, strings.Join(names, ", "))
}
diff --git a/cmd/algocfg/profileCommand_test.go b/cmd/algocfg/profileCommand_test.go
new file mode 100644
index 000000000..a99b65f0b
--- /dev/null
+++ b/cmd/algocfg/profileCommand_test.go
@@ -0,0 +1,48 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package main
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func Test_getConfigForArg(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ t.Run("invalid config test", func(t *testing.T) {
+ t.Parallel()
+ _, err := getConfigForArg("invalid")
+
+ for name := range profileNames {
+ require.ErrorContains(t, err, name)
+ }
+
+ })
+
+ t.Run("valid config test", func(t *testing.T) {
+ t.Parallel()
+ cfg, err := getConfigForArg("conduit")
+ require.NoError(t, err)
+ require.True(t, cfg.EnableFollowMode)
+ })
+
+}
diff --git a/cmd/algod/main.go b/cmd/algod/main.go
index cb0960e61..8ff24a970 100644
--- a/cmd/algod/main.go
+++ b/cmd/algod/main.go
@@ -284,10 +284,15 @@ func run() int {
if err != nil {
log.Errorf("cannot locate node executable: %s", err)
} else {
- phonebookDir := filepath.Dir(ex)
- phonebookAddresses, err = config.LoadPhonebook(phonebookDir)
- if err != nil {
- log.Debugf("Cannot load static phonebook: %v", err)
+ phonebookDirs := []string{filepath.Dir(ex), dataDir}
+ for _, phonebookDir := range phonebookDirs {
+ phonebookAddresses, err = config.LoadPhonebook(phonebookDir)
+ if err == nil {
+ log.Debugf("Static phonebook loaded from %s", phonebookDir)
+ break
+ } else {
+ log.Debugf("Cannot load static phonebook from %s dir: %v", phonebookDir, err)
+ }
}
}
}
diff --git a/cmd/catchpointdump/database.go b/cmd/catchpointdump/database.go
index a45b300e5..7706bcd3d 100644
--- a/cmd/catchpointdump/database.go
+++ b/cmd/catchpointdump/database.go
@@ -17,6 +17,7 @@
package main
import (
+ "bufio"
"context"
"database/sql"
"fmt"
@@ -63,6 +64,13 @@ var databaseCmd = &cobra.Command{
}
defer outFile.Close()
}
+
+ var version uint64
+ version, err = getVersion(ledgerTrackerFilename, ledgerTrackerStaging)
+ if err != nil {
+ reportErrorf("Unable to read version : %v", err)
+ }
+ printDbVersion(ledgerTrackerStaging, version, outFile)
err = printAccountsDatabase(ledgerTrackerFilename, ledgerTrackerStaging, ledger.CatchpointFileHeader{}, outFile, nil)
if err != nil {
reportErrorf("Unable to print account database : %v", err)
@@ -71,9 +79,56 @@ var databaseCmd = &cobra.Command{
if err != nil {
reportErrorf("Unable to print key value store : %v", err)
}
+ // state proof verification can be found on tracker db version >= 10 or
+ // catchpoint file version >= 7 (i.e staging tables)
+ if !ledgerTrackerStaging && version < 10 || ledgerTrackerStaging && version < ledger.CatchpointFileVersionV7 {
+ return
+ }
+ err = printStateProofVerificationContext(ledgerTrackerFilename, ledgerTrackerStaging, outFile)
+ if err != nil {
+ reportErrorf("Unable to print state proof verification database : %v", err)
+ }
},
}
+func printDbVersion(staging bool, version uint64, outFile *os.File) {
+ fileWriter := bufio.NewWriterSize(outFile, 1024*1024)
+ defer fileWriter.Flush()
+
+ if staging {
+ fmt.Fprintf(outFile, "Catchpoint version: %d \n", version)
+ } else {
+ fmt.Fprintf(outFile, "Ledger db version: %d \n", version)
+ }
+}
+
+func getVersion(filename string, staging bool) (uint64, error) {
+ dbAccessor, err := db.MakeAccessor(filename, true, false)
+ if err != nil || dbAccessor.Handle == nil {
+ return 0, err
+ }
+ defer dbAccessor.Close()
+ var version uint64
+ err = dbAccessor.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ if staging {
+ // writing the version of the catchpoint file start only on ver >= CatchpointFileVersionV7.
+ // in case the catchpoint version does not exists ReadCatchpointStateUint64 returns 0
+ cw := sqlitedriver.NewCatchpointSQLReaderWriter(tx)
+ version, err = cw.ReadCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupVersion)
+ return err
+ }
+
+ versionAsInt32, err := db.GetUserVersion(ctx, tx)
+ version = uint64(versionAsInt32)
+ return err
+ })
+ if err != nil {
+ return 0, err
+ }
+
+ return version, nil
+}
+
var checkCmd = &cobra.Command{
Use: "check",
Short: "Performs a consistency checking on the accounts merkle trie",
diff --git a/cmd/catchpointdump/file.go b/cmd/catchpointdump/file.go
index 50ae82fe9..e99a0b406 100644
--- a/cmd/catchpointdump/file.go
+++ b/cmd/catchpointdump/file.go
@@ -47,7 +47,7 @@ import (
var catchpointFile string
var outFileName string
-var excludedFields *cmdutil.CobraStringSliceValue = cmdutil.MakeCobraStringSliceValue(nil, []string{"version", "catchpoint"})
+var excludedFields = cmdutil.MakeCobraStringSliceValue(nil, []string{"version", "catchpoint"})
func init() {
fileCmd.Flags().StringVarP(&catchpointFile, "tar", "t", "", "Specify the catchpoint file (either .tar or .tar.gz) to process")
@@ -127,7 +127,6 @@ var fileCmd = &cobra.Command{
}
defer outFile.Close()
}
-
err = printAccountsDatabase("./ledger.tracker.sqlite", true, fileHeader, outFile, excludedFields.GetSlice())
if err != nil {
reportErrorf("Unable to print account database : %v", err)
@@ -136,6 +135,10 @@ var fileCmd = &cobra.Command{
if err != nil {
reportErrorf("Unable to print key value store : %v", err)
}
+ err = printStateProofVerificationContext("./ledger.tracker.sqlite", true, outFile)
+ if err != nil {
+ reportErrorf("Unable to print state proof verification database : %v", err)
+ }
}
},
}
@@ -167,31 +170,36 @@ func isGzipCompressed(catchpointReader *bufio.Reader, catchpointFileSize int64)
return prefixBytes[0] == gzipPrefix[0] && prefixBytes[1] == gzipPrefix[1]
}
-func getCatchpointTarReader(catchpointReader *bufio.Reader, catchpointFileSize int64) (*tar.Reader, error) {
+func getCatchpointTarReader(catchpointReader *bufio.Reader, catchpointFileSize int64) (*tar.Reader, bool, error) {
if isGzipCompressed(catchpointReader, catchpointFileSize) {
gzipReader, err := gzip.NewReader(catchpointReader)
if err != nil {
- return nil, err
+ return nil, false, err
}
-
- return tar.NewReader(gzipReader), nil
+ return tar.NewReader(gzipReader), true, nil
}
- return tar.NewReader(catchpointReader), nil
+ return tar.NewReader(catchpointReader), false, nil
}
func loadCatchpointIntoDatabase(ctx context.Context, catchupAccessor ledger.CatchpointCatchupAccessor, catchpointFile io.Reader, catchpointFileSize int64) (fileHeader ledger.CatchpointFileHeader, err error) {
fmt.Printf("\n")
- printLoadCatchpointProgressLine(0, 50, 0)
+ const barLength = 50
+ printLoadCatchpointProgressLine(0, barLength, 0)
lastProgressUpdate := time.Now()
progress := uint64(0)
defer printLoadCatchpointProgressLine(0, 0, 0)
catchpointReader := bufio.NewReader(catchpointFile)
- tarReader, err := getCatchpointTarReader(catchpointReader, catchpointFileSize)
+ tarReader, isCompressed, err := getCatchpointTarReader(catchpointReader, catchpointFileSize)
if err != nil {
return fileHeader, err
}
+ if isCompressed {
+ // gzip'ed file is about 3-6 times smaller than tar
+ // modify catchpointFileSize to make the progress bar more-less reflecting the state
+ catchpointFileSize = 4 * catchpointFileSize
+ }
var downloadProgress ledger.CatchpointCatchupAccessorProgress
for {
@@ -223,13 +231,17 @@ func loadCatchpointIntoDatabase(ctx context.Context, catchupAccessor ledger.Catc
if err != nil {
return fileHeader, err
}
- if header.Name == "content.msgpack" {
+ if header.Name == ledger.CatchpointContentFileName {
// we already know it's valid, since we validated that above.
protocol.Decode(balancesBlockBytes, &fileHeader)
}
if time.Since(lastProgressUpdate) > 50*time.Millisecond && catchpointFileSize > 0 {
lastProgressUpdate = time.Now()
- printLoadCatchpointProgressLine(int(float64(progress)*50.0/float64(catchpointFileSize)), 50, int64(progress))
+ progressRatio := int(float64(progress) * barLength / float64(catchpointFileSize))
+ if progressRatio > barLength {
+ progressRatio = barLength
+ }
+ printLoadCatchpointProgressLine(progressRatio, barLength, int64(progress))
}
}
}
@@ -434,6 +446,42 @@ func printAccountsDatabase(databaseName string, stagingTables bool, fileHeader l
})
}
+func printStateProofVerificationContext(databaseName string, stagingTables bool, outFile *os.File) error {
+ fileWriter := bufio.NewWriterSize(outFile, 1024*1024)
+ defer fileWriter.Flush()
+
+ dbAccessor, err := db.MakeAccessor(databaseName, true, false)
+ if err != nil || dbAccessor.Handle == nil {
+ return err
+ }
+ defer dbAccessor.Close()
+
+ var stateProofVerificationContext []ledgercore.StateProofVerificationContext
+ err = dbAccessor.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ if stagingTables {
+ stateProofVerificationContext, err = sqlitedriver.MakeStateProofVerificationReader(tx).GetAllSPContextsFromCatchpointTbl(ctx)
+ } else {
+ stateProofVerificationContext, err = sqlitedriver.MakeStateProofVerificationReader(tx).GetAllSPContexts(ctx)
+ }
+ return err
+ })
+
+ if err != nil {
+ return err
+ }
+
+ fmt.Fprintf(fileWriter, "State Proof Verification Data:\n")
+ for _, ctx := range stateProofVerificationContext {
+ jsonData, err := json.Marshal(ctx)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(fileWriter, "%d : %s\n", ctx.LastAttestedRound, string(jsonData))
+ }
+
+ return nil
+}
+
func printKeyValue(writer *bufio.Writer, key, value []byte) {
var pretty string
ai, rest, err := apps.SplitBoxKey(string(key))
diff --git a/cmd/catchpointdump/net.go b/cmd/catchpointdump/net.go
index d30cfc297..e0fdb7f89 100644
--- a/cmd/catchpointdump/net.go
+++ b/cmd/catchpointdump/net.go
@@ -361,6 +361,10 @@ func loadAndDump(addr string, tarFile string, genesisInitState ledgercore.InitSt
if err != nil {
return err
}
+ err = printStateProofVerificationContext("./ledger.tracker.sqlite", true, outFile)
+ if err != nil {
+ return err
+ }
}
return nil
diff --git a/cmd/goal/README.md b/cmd/goal/README.md
index 2ecebf65b..f33e1ce41 100644
--- a/cmd/goal/README.md
+++ b/cmd/goal/README.md
@@ -68,7 +68,7 @@ TEALDIR=cmd/goal/examples
echo $TEALDIR
# create the app and TAKE NOTE of its "app index"
-goal app create --creator ${ACCOUNT} --approval-prog ${TEALDIR}/boxes.teal --clear-prog ${TEALDIR}/clear.teal --global-byteslices 0 --global-ints 0 --local-byteslices 0 --local-ints 0
+goal app create --creator ${ACCOUNT} --approval-prog ${TEALDIR}/boxes.teal --clear-prog ${TEALDIR}/clear.teal
```
For the following questions, you'll need to use the app index. That will be shown in the last line printed. EG:
diff --git a/cmd/goal/application.go b/cmd/goal/application.go
index 092079330..cb048ce50 100644
--- a/cmd/goal/application.go
+++ b/cmd/goal/application.go
@@ -163,10 +163,6 @@ func init() {
readStateAppCmd.Flags().BoolVar(&guessFormat, "guess-format", false, "Format application state using heuristics to guess data encoding.")
createAppCmd.MarkFlagRequired("creator")
- createAppCmd.MarkFlagRequired("global-ints")
- createAppCmd.MarkFlagRequired("global-byteslices")
- createAppCmd.MarkFlagRequired("local-ints")
- createAppCmd.MarkFlagRequired("local-byteslices")
optInAppCmd.MarkFlagRequired("app-id")
optInAppCmd.MarkFlagRequired("from")
@@ -1063,7 +1059,7 @@ var infoAppCmd = &cobra.Command{
}
// populateMethodCallTxnArgs parses and loads transactions from the files indicated by the values
-// slice. An error will occur if the transaction does not matched the expected type, it has a nonzero
+// slice. An error will occur if the transaction does not match the expected type, it has a nonzero
// group ID, or if it is signed by a normal signature or Msig signature (but not Lsig signature)
func populateMethodCallTxnArgs(types []string, values []string) ([]transactions.SignedTxn, error) {
loadedTxns := make([]transactions.SignedTxn, len(values))
diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go
index 214782894..e27d5e031 100644
--- a/cmd/goal/clerk.go
+++ b/cmd/goal/clerk.go
@@ -30,12 +30,14 @@ import (
"github.com/algorand/go-algorand/cmd/util/datadir"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2"
"github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/transactions/verify"
+ "github.com/algorand/go-algorand/ledger/simulation"
"github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/protocol"
@@ -43,27 +45,34 @@ import (
)
var (
- toAddress string
- account string
- amount uint64
- txFilename string
- rejectsFilename string
- closeToAddress string
- noProgramOutput bool
- writeSourceMap bool
- signProgram bool
- programSource string
- argB64Strings []string
- disassemble bool
- verbose bool
- progByteFile string
- msigParams string
- logicSigFile string
- timeStamp int64
- protoVersion string
- rekeyToAddress string
- signerAddress string
- rawOutput bool
+ toAddress string
+ account string
+ amount uint64
+ txFilename string
+ rejectsFilename string
+ closeToAddress string
+ noProgramOutput bool
+ writeSourceMap bool
+ signProgram bool
+ programSource string
+ argB64Strings []string
+ disassemble bool
+ verbose bool
+ progByteFile string
+ msigParams string
+ logicSigFile string
+ timeStamp int64
+ protoVersion string
+ rekeyToAddress string
+ signerAddress string
+ rawOutput bool
+ requestFilename string
+ requestOutFilename string
+
+ simulateAllowEmptySignatures bool
+ simulateAllowMoreLogging bool
+ simulateAllowMoreOpcodeBudget bool
+ simulateExtraOpcodeBudget uint64
)
func init() {
@@ -76,6 +85,7 @@ func init() {
clerkCmd.AddCommand(compileCmd)
clerkCmd.AddCommand(dryrunCmd)
clerkCmd.AddCommand(dryrunRemoteCmd)
+ clerkCmd.AddCommand(simulateCmd)
// Wallet to be used for the clerk operation
clerkCmd.PersistentFlags().StringVarP(&walletName, "wallet", "w", "", "Set the wallet to be used for the selected operation")
@@ -88,7 +98,7 @@ func init() {
sendCmd.Flags().StringVar(&rekeyToAddress, "rekey-to", "", "Rekey account to the given spending key/address. (Future transactions from this account will need to be signed with the new key.)")
sendCmd.Flags().StringVarP(&programSource, "from-program", "F", "", "Program source to use as account logic")
sendCmd.Flags().StringVarP(&progByteFile, "from-program-bytes", "P", "", "Program binary to use as account logic")
- sendCmd.Flags().StringSliceVar(&argB64Strings, "argb64", nil, "base64 encoded args to pass to transaction logic")
+ sendCmd.Flags().StringSliceVar(&argB64Strings, "argb64", nil, "Base64 encoded args to pass to transaction logic")
sendCmd.Flags().StringVarP(&logicSigFile, "logic-sig", "L", "", "LogicSig to apply to transaction")
sendCmd.Flags().StringVar(&msigParams, "msig-params", "", "Multisig preimage parameters - [threshold] [Address 1] [Address 2] ...\nUsed to add the necessary fields in case the account was rekeyed to a multisig account")
sendCmd.MarkFlagRequired("to")
@@ -108,8 +118,8 @@ func init() {
signCmd.Flags().StringVarP(&signerAddress, "signer", "S", "", "Address of key to sign with, if different from transaction \"from\" address due to rekeying")
signCmd.Flags().StringVarP(&programSource, "program", "p", "", "Program source to use as account logic")
signCmd.Flags().StringVarP(&logicSigFile, "logic-sig", "L", "", "LogicSig to apply to transaction")
- signCmd.Flags().StringSliceVar(&argB64Strings, "argb64", nil, "base64 encoded args to pass to transaction logic")
- signCmd.Flags().StringVarP(&protoVersion, "proto", "P", "", "consensus protocol version id string")
+ signCmd.Flags().StringSliceVar(&argB64Strings, "argb64", nil, "Base64 encoded args to pass to transaction logic")
+ signCmd.Flags().StringVarP(&protoVersion, "proto", "P", "", "Consensus protocol version id string")
signCmd.MarkFlagRequired("infile")
signCmd.MarkFlagRequired("outfile")
@@ -123,26 +133,34 @@ func init() {
splitCmd.MarkFlagRequired("infile")
splitCmd.MarkFlagRequired("outfile")
- compileCmd.Flags().BoolVarP(&disassemble, "disassemble", "D", false, "disassemble a compiled program")
- compileCmd.Flags().BoolVarP(&noProgramOutput, "no-out", "n", false, "don't write contract program binary")
- compileCmd.Flags().BoolVarP(&writeSourceMap, "map", "m", false, "write out source map")
- compileCmd.Flags().BoolVarP(&signProgram, "sign", "s", false, "sign program, output is a binary signed LogicSig record")
+ compileCmd.Flags().BoolVarP(&disassemble, "disassemble", "D", false, "Disassemble a compiled program")
+ compileCmd.Flags().BoolVarP(&noProgramOutput, "no-out", "n", false, "Don't write contract program binary")
+ compileCmd.Flags().BoolVarP(&writeSourceMap, "map", "m", false, "Write out source map")
+ compileCmd.Flags().BoolVarP(&signProgram, "sign", "s", false, "Sign program, output is a binary signed LogicSig record")
compileCmd.Flags().StringVarP(&outFilename, "outfile", "o", "", "Filename to write program bytes or signed LogicSig to")
compileCmd.Flags().StringVarP(&account, "account", "a", "", "Account address to sign the program (If not specified, uses default account)")
- dryrunCmd.Flags().StringVarP(&txFilename, "txfile", "t", "", "transaction or transaction-group to test")
- dryrunCmd.Flags().StringVarP(&protoVersion, "proto", "P", "", "consensus protocol version id string")
+ dryrunCmd.Flags().StringVarP(&txFilename, "txfile", "t", "", "Transaction or transaction-group to test")
+ dryrunCmd.Flags().StringVarP(&protoVersion, "proto", "P", "", "Consensus protocol version id string")
dryrunCmd.Flags().BoolVar(&dumpForDryrun, "dryrun-dump", false, "Dump in dryrun format acceptable by dryrun REST api instead of running")
dryrunCmd.Flags().Var(&dumpForDryrunFormat, "dryrun-dump-format", "Dryrun dump format: "+dumpForDryrunFormat.AllowedString())
- dryrunCmd.Flags().StringSliceVar(&dumpForDryrunAccts, "dryrun-accounts", nil, "additional accounts to include into dryrun request obj")
+ dryrunCmd.Flags().StringSliceVar(&dumpForDryrunAccts, "dryrun-accounts", nil, "Additional accounts to include into dryrun request obj")
dryrunCmd.Flags().StringVarP(&outFilename, "outfile", "o", "", "Filename for writing dryrun state object")
dryrunCmd.MarkFlagRequired("txfile")
- dryrunRemoteCmd.Flags().StringVarP(&txFilename, "dryrun-state", "D", "", "dryrun request object to run")
- dryrunRemoteCmd.Flags().BoolVarP(&verbose, "verbose", "v", false, "print more info")
- dryrunRemoteCmd.Flags().BoolVarP(&rawOutput, "raw", "r", false, "output raw response from algod")
+ dryrunRemoteCmd.Flags().StringVarP(&txFilename, "dryrun-state", "D", "", "Dryrun request object to run")
+ dryrunRemoteCmd.Flags().BoolVarP(&verbose, "verbose", "v", false, "Print more info")
+ dryrunRemoteCmd.Flags().BoolVarP(&rawOutput, "raw", "r", false, "Output raw response from algod")
dryrunRemoteCmd.MarkFlagRequired("dryrun-state")
+ simulateCmd.Flags().StringVarP(&txFilename, "txfile", "t", "", "Transaction or transaction-group to test. Mutually exclusive with --request")
+ simulateCmd.Flags().StringVar(&requestFilename, "request", "", "Simulate request object to run. Mutually exclusive with --txfile")
+ simulateCmd.Flags().StringVar(&requestOutFilename, "request-only-out", "", "Filename for writing simulate request object. If provided, the command will only write the request object and exit. No simulation will happen")
+ simulateCmd.Flags().StringVarP(&outFilename, "result-out", "o", "", "Filename for writing simulation result")
+ simulateCmd.Flags().BoolVar(&simulateAllowEmptySignatures, "allow-empty-signatures", false, "Allow transactions without signatures to be simulated as if they had correct signatures")
+ simulateCmd.Flags().BoolVar(&simulateAllowMoreLogging, "allow-more-logging", false, "Lift the limits on log opcode during simulation")
+ simulateCmd.Flags().BoolVar(&simulateAllowMoreOpcodeBudget, "allow-more-opcode-budget", false, "Apply max extra opcode budget for apps per transaction group (default 320000) during simulation")
+ simulateCmd.Flags().Uint64Var(&simulateExtraOpcodeBudget, "extra-opcode-budget", 0, "Apply extra opcode budget for apps per transaction group during simulation")
}
var clerkCmd = &cobra.Command{
@@ -914,32 +932,12 @@ var splitCmd = &cobra.Command{
Long: `Split a file containing many transactions. The input file must contain one or more transactions. These transactions will be written to individual files.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, args []string) {
- data, err := readFile(txFilename)
- if err != nil {
- reportErrorf(fileReadError, txFilename, err)
- }
-
- dec := protocol.NewMsgpDecoderBytes(data)
-
- var txns []transactions.SignedTxn
- for {
- var txn transactions.SignedTxn
- err = dec.Decode(&txn)
- if err == io.EOF {
- break
- }
- if err != nil {
- reportErrorf(txDecodeError, txFilename, err)
- }
-
- txns = append(txns, txn)
- }
-
+ txns := decodeTxnsFromFile(txFilename)
outExt := filepath.Ext(outFilename)
outBase := outFilename[:len(outFilename)-len(outExt)]
- for idx, txn := range txns {
+ for idx := range txns {
fn := fmt.Sprintf("%s-%d%s", outBase, idx, outExt)
- err = writeFile(fn, protocol.Encode(&txn), 0600)
+ err := writeFile(fn, protocol.Encode(&txns[idx]), 0600)
if err != nil {
reportErrorf(fileWriteError, outFilename, err)
}
@@ -1115,23 +1113,7 @@ var dryrunCmd = &cobra.Command{
Short: "Test a program offline",
Long: "Test a TEAL program offline under various conditions and verbosity.",
Run: func(cmd *cobra.Command, args []string) {
- data, err := readFile(txFilename)
- if err != nil {
- reportErrorf(fileReadError, txFilename, err)
- }
- dec := protocol.NewMsgpDecoderBytes(data)
- stxns := make([]transactions.SignedTxn, 0, 10)
- for {
- var txn transactions.SignedTxn
- err = dec.Decode(&txn)
- if err == io.EOF {
- break
- }
- if err != nil {
- reportErrorf(txDecodeError, txFilename, err)
- }
- stxns = append(stxns, txn)
- }
+ stxns := decodeTxnsFromFile(txFilename)
txgroup := transactions.WrapSignedTxnsWithAD(stxns)
proto, params := getProto(protoVersion)
if dumpForDryrun {
@@ -1253,6 +1235,96 @@ var dryrunRemoteCmd = &cobra.Command{
},
}
+var simulateCmd = &cobra.Command{
+ Use: "simulate",
+ Short: "Simulate a transaction or transaction group with algod's simulate REST endpoint",
+ Long: `Simulate a transaction or transaction group with algod's simulate REST endpoint under various configurations.`,
+ Run: func(cmd *cobra.Command, args []string) {
+ txProvided := cmd.Flags().Changed("txfile")
+ requestProvided := cmd.Flags().Changed("request")
+ if txProvided == requestProvided {
+ reportErrorf("exactly one of --txfile or --request must be provided")
+ }
+
+ extraBudgetProvided := cmd.Flags().Changed("extra-opcode-budget")
+ if simulateAllowMoreOpcodeBudget && extraBudgetProvided {
+ reportErrorf("--allow-extra-opcode-budget and --extra-opcode-budget are mutually exclusive")
+ }
+ if simulateAllowMoreOpcodeBudget {
+ simulateExtraOpcodeBudget = simulation.MaxExtraOpcodeBudget
+ }
+
+ requestOutProvided := cmd.Flags().Changed("request-only-out")
+ resultOutProvided := cmd.Flags().Changed("result-out")
+ if requestOutProvided && resultOutProvided {
+ reportErrorf("--request-only-out and --result-out are mutually exclusive")
+ }
+
+ if requestOutProvided {
+ // If request-only-out is provided, only create a request and write it. Do not actually
+ // simulate.
+ if requestProvided {
+ reportErrorf("--request-only-out and --request are mutually exclusive")
+ }
+ txgroup := decodeTxnsFromFile(txFilename)
+ simulateRequest := v2.PreEncodedSimulateRequest{
+ TxnGroups: []v2.PreEncodedSimulateRequestTransactionGroup{
+ {
+ Txns: txgroup,
+ },
+ },
+ AllowEmptySignatures: simulateAllowEmptySignatures,
+ AllowMoreLogging: simulateAllowMoreLogging,
+ ExtraOpcodeBudget: simulateExtraOpcodeBudget,
+ }
+ err := writeFile(requestOutFilename, protocol.EncodeJSON(simulateRequest), 0600)
+ if err != nil {
+ reportErrorf("write file error: %s", err.Error())
+ }
+ return
+ }
+
+ dataDir := datadir.EnsureSingleDataDir()
+ client := ensureFullClient(dataDir)
+ var simulateResponse v2.PreEncodedSimulateResponse
+ var responseErr error
+ if txProvided {
+ txgroup := decodeTxnsFromFile(txFilename)
+ simulateRequest := v2.PreEncodedSimulateRequest{
+ TxnGroups: []v2.PreEncodedSimulateRequestTransactionGroup{
+ {
+ Txns: txgroup,
+ },
+ },
+ AllowEmptySignatures: simulateAllowEmptySignatures,
+ AllowMoreLogging: simulateAllowMoreLogging,
+ ExtraOpcodeBudget: simulateExtraOpcodeBudget,
+ }
+ simulateResponse, responseErr = client.SimulateTransactions(simulateRequest)
+ } else {
+ data, err := readFile(requestFilename)
+ if err != nil {
+ reportErrorf(fileReadError, requestFilename, err)
+ }
+ simulateResponse, responseErr = client.SimulateTransactionsRaw(data)
+ }
+
+ if responseErr != nil {
+ reportErrorf("simulation error: %s", responseErr.Error())
+ }
+
+ encodedResponse := protocol.EncodeJSON(&simulateResponse)
+ if outFilename != "" {
+ err := writeFile(outFilename, encodedResponse, 0600)
+ if err != nil {
+ reportErrorf("write file error: %s", err.Error())
+ }
+ } else {
+ fmt.Println(string(encodedResponse))
+ }
+ },
+}
+
// unmarshalSlice converts string addresses to basics.Address
func unmarshalSlice(accts []string) ([]basics.Address, error) {
result := make([]basics.Address, 0, len(accts))
@@ -1265,3 +1337,24 @@ func unmarshalSlice(accts []string) ([]basics.Address, error) {
}
return result, nil
}
+
+func decodeTxnsFromFile(file string) []transactions.SignedTxn {
+ data, err := readFile(file)
+ if err != nil {
+ reportErrorf(fileReadError, txFilename, err)
+ }
+ var txgroup []transactions.SignedTxn
+ dec := protocol.NewMsgpDecoderBytes(data)
+ for {
+ var txn transactions.SignedTxn
+ err = dec.Decode(&txn)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ reportErrorf(txDecodeError, txFilename, err)
+ }
+ txgroup = append(txgroup, txn)
+ }
+ return txgroup
+}
diff --git a/cmd/goal/messages.go b/cmd/goal/messages.go
index 6bf976f6a..bcb7b3fd5 100644
--- a/cmd/goal/messages.go
+++ b/cmd/goal/messages.go
@@ -62,7 +62,7 @@ const (
infoNodeShuttingDown = "Algorand node is shutting down..."
infoNodeSuccessfullyStopped = "The node was successfully stopped."
infoNodeStatus = "Last committed block: %d\nTime since last block: %s\nSync Time: %s\nLast consensus protocol: %s\nNext consensus protocol: %s\nRound for next consensus protocol: %d\nNext consensus protocol supported: %v"
- infoNodeStatusConsensusUpgradeVoting = "Consensus upgrate state: Voting\nYes votes: %d\nNo votes: %d\nVotes remaining: %d\nYes votes required: %d\nVote window close round: %d"
+ infoNodeStatusConsensusUpgradeVoting = "Consensus upgrade state: Voting\nYes votes: %d\nNo votes: %d\nVotes remaining: %d\nYes votes required: %d\nVote window close round: %d"
infoNodeStatusConsensusUpgradeScheduled = "Consensus upgrade state: Scheduled"
catchupStoppedOnUnsupported = "Last supported block (%d) is committed. The next block consensus protocol is not supported. Catchup service is stopped."
infoNodeCatchpointCatchupStatus = "Last committed block: %d\nSync Time: %s\nCatchpoint: %s"
diff --git a/cmd/goal/node.go b/cmd/goal/node.go
index b8b5f2efa..2b70c2532 100644
--- a/cmd/goal/node.go
+++ b/cmd/goal/node.go
@@ -482,6 +482,7 @@ func makeStatusString(stat model.NodeStatusResponse) string {
upgradeVotesRequired := uint64(0)
upgradeNoVotes := uint64(0)
upgradeYesVotes := uint64(0)
+ upgradeVoteRounds := uint64(0)
if stat.UpgradeVotesRequired != nil {
upgradeVotesRequired = *stat.UpgradeVotesRequired
}
@@ -491,11 +492,14 @@ func makeStatusString(stat model.NodeStatusResponse) string {
if stat.UpgradeYesVotes != nil {
upgradeYesVotes = *stat.UpgradeYesVotes
}
+ if stat.UpgradeVoteRounds != nil {
+ upgradeVoteRounds = *stat.UpgradeVoteRounds
+ }
statusString = statusString + "\n" + fmt.Sprintf(
infoNodeStatusConsensusUpgradeVoting,
upgradeYesVotes,
upgradeNoVotes,
- upgradeNextProtocolVoteBefore-stat.LastRound,
+ upgradeVoteRounds-upgradeYesVotes-upgradeNoVotes,
upgradeVotesRequired,
upgradeNextProtocolVoteBefore,
)
diff --git a/cmd/loadgenerator/main.go b/cmd/loadgenerator/main.go
index 6ef8cd238..594f89380 100644
--- a/cmd/loadgenerator/main.go
+++ b/cmd/loadgenerator/main.go
@@ -69,14 +69,14 @@ func findRootKeys(algodDir string) []*crypto.SignatureSecrets {
var handle db.Accessor
handle, err := db.MakeErasableAccessor(path)
if err != nil {
- return nil // don't care, move on
+ return nil //nolint:nilerr // don't care, move on
}
defer handle.Close()
// Fetch an account.Participation from the database
root, err := algodAcct.RestoreRoot(handle)
if err != nil {
- return nil // don't care, move on
+ return nil //nolint:nilerr // don't care, move on
}
keylist = append(keylist, root.Secrets())
return nil
diff --git a/cmd/opdoc/opdoc.go b/cmd/opdoc/opdoc.go
index a7f189401..b5e039a2b 100644
--- a/cmd/opdoc/opdoc.go
+++ b/cmd/opdoc/opdoc.go
@@ -21,6 +21,7 @@ import (
"fmt"
"io"
"os"
+ "sort"
"strings"
"github.com/algorand/go-algorand/config"
@@ -28,7 +29,39 @@ import (
"github.com/algorand/go-algorand/protocol"
)
-var docVersion = 8
+var docVersion = 9
+
+// OpImmediateNote returns a short string about immediate data which follows the op byte
+func opImmediateNoteSyntaxMarkdown(name string, oids []logic.OpImmediateDetails) string {
+ if len(oids) == 0 {
+ return ""
+ }
+
+ argNames := make([]string, len(oids))
+ argDocs := make([]string, len(oids))
+ for idx, oid := range oids {
+ argNote := oid.Comment
+ if oid.Reference != "" {
+ argNote = fmt.Sprintf("[%s](#field-group-%s)", oid.Reference, strings.ToLower(oid.Reference))
+ }
+ argNames[idx] = oid.Name
+ argDocs[idx] = fmt.Sprintf("%s: %s", oid.Name, argNote)
+ }
+
+ return fmt.Sprintf("`%s %s` ∋ %s", name, strings.Join(argNames, " "), strings.Join(argDocs, ", "))
+}
+
+func opImmediateNoteEncoding(opcode byte, oids []logic.OpImmediateDetails) string {
+ if len(oids) == 0 {
+ return fmt.Sprintf("0x%02x", opcode)
+ }
+
+ notes := make([]string, len(oids))
+ for idx, oid := range oids {
+ notes[idx] = oid.Encoding
+ }
+ return fmt.Sprintf("0x%02x {%s}", opcode, strings.Join(notes, "}, {"))
+}
func opGroupMarkdownTable(names []string, out io.Writer) {
fmt.Fprint(out, `| Opcode | Description |
@@ -50,6 +83,17 @@ func markdownTableEscape(x string) string {
return strings.ReplaceAll(x, "|", "\\|")
}
+func namedStackTypesMarkdown(out io.Writer, stackTypes []namedType) {
+ fmt.Fprintf(out, "#### Definitions\n\n")
+ fmt.Fprintf(out, "| Name | Bound | AVM Type |\n")
+ fmt.Fprintf(out, "| ---- | ---- | -------- |\n")
+
+ for _, st := range stackTypes {
+ fmt.Fprintf(out, "| %s | %s | %s |\n", st.Name, st.boundString(), st.AVMType)
+ }
+ fmt.Fprintf(out, "\n")
+}
+
func integerConstantsTableMarkdown(out io.Writer) {
fmt.Fprintf(out, "#### OnComplete\n\n")
fmt.Fprintf(out, "%s\n\n", logic.OnCompletionPreamble)
@@ -169,14 +213,22 @@ func stackMarkdown(op *logic.OpSpec) string {
}
func opToMarkdown(out io.Writer, op *logic.OpSpec, groupDocWritten map[string]bool) (err error) {
- ws := ""
- opextra := logic.OpImmediateNote(op.Name)
- if opextra != "" {
- ws = " "
+
+ deets := logic.OpImmediateDetailsFromSpec(*op)
+
+ // Only need syntax line if there are immediates
+ // so it carries its own newline
+ syntax := ""
+ if opSyntax := opImmediateNoteSyntaxMarkdown(op.Name, deets); opSyntax != "" {
+ syntax = fmt.Sprintf("- Syntax: %s\n", opSyntax)
}
+
+ encoding := fmt.Sprintf("- Bytecode: %s", opImmediateNoteEncoding(op.Opcode, deets))
+
stackEffects := stackMarkdown(op)
- fmt.Fprintf(out, "\n## %s%s\n\n- Opcode: 0x%02x%s%s\n%s",
- op.Name, immediateMarkdown(op), op.Opcode, ws, opextra, stackEffects)
+
+ fmt.Fprintf(out, "\n## %s\n\n%s%s\n%s", op.Name, syntax, encoding, stackEffects)
+
fmt.Fprintf(out, "- %s\n", logic.OpDoc(op.Name))
// if cost changed with versions print all of them
costs := logic.OpAllCosts(op.Name)
@@ -209,7 +261,7 @@ func opToMarkdown(out io.Writer, op *logic.OpSpec, groupDocWritten map[string]bo
for i := range op.OpDetails.Immediates {
group := op.OpDetails.Immediates[i].Group
if group != nil && group.Doc != "" && !groupDocWritten[group.Name] {
- fmt.Fprintf(out, "\n`%s` %s:\n\n", group.Name, group.Doc)
+ fmt.Fprintf(out, "\n### %s\n\n%s\n\n", group.Name, group.Doc)
fieldGroupMarkdown(out, group)
groupDocWritten[group.Name] = true
}
@@ -238,56 +290,86 @@ func opsToMarkdown(out io.Writer) (err error) {
type OpRecord struct {
Opcode byte
Name string
- Args string `json:",omitempty"`
- Returns string `json:",omitempty"`
+ Args []string `json:",omitempty"`
+ Returns []string `json:",omitempty"`
Size int
ArgEnum []string `json:",omitempty"`
- ArgEnumTypes string `json:",omitempty"`
+ ArgEnumTypes []string `json:",omitempty"`
Doc string
- DocExtra string `json:",omitempty"`
- ImmediateNote string `json:",omitempty"`
+ DocExtra string `json:",omitempty"`
+ ImmediateNote []logic.OpImmediateDetails `json:",omitempty"`
IntroducedVersion uint64
Groups []string
}
+type namedType struct {
+ Name string
+ Abbreviation string
+ Bound []uint64
+ AVMType string
+}
+
+func (nt namedType) boundString() string {
+ if nt.Bound[0] == 0 && nt.Bound[1] == 0 {
+ return ""
+ }
+
+ val := "x"
+ // if its bytes, the length is bounded
+ if nt.AVMType == "[]byte" {
+ val = "len(x)"
+ }
+
+ // If they're equal, the val should match exactly
+ if nt.Bound[0] > 0 && nt.Bound[0] == nt.Bound[1] {
+ return fmt.Sprintf("%s == %d", val, nt.Bound[0])
+ }
+
+ // otherwise, provide min/max bounds as lte expression
+ minBound, maxBound := "", ""
+ if nt.Bound[0] > 0 {
+ minBound = fmt.Sprintf("%d <= ", nt.Bound[0])
+ }
+ if nt.Bound[1] > 0 {
+ maxBound = fmt.Sprintf(" <= %d", nt.Bound[1])
+ }
+
+ return fmt.Sprintf("%s%s%s", minBound, val, maxBound)
+
+}
+
// LanguageSpec records the ops of the language at some version
type LanguageSpec struct {
EvalMaxVersion int
LogicSigVersion uint64
+ NamedTypes []namedType
Ops []OpRecord
}
-func typeString(types []logic.StackType) string {
- out := make([]byte, len(types))
- for i, t := range types {
- switch t {
- case logic.StackUint64:
- out[i] = 'U'
- case logic.StackBytes:
- out[i] = 'B'
- case logic.StackAny:
- out[i] = '.'
- case logic.StackNone:
- out[i] = '_'
- default:
- panic("unexpected type in opdoc typeString")
+func typeStrings(types logic.StackTypes) []string {
+ out := make([]string, len(types))
+ allNones := true
+ for idx, t := range types {
+ out[idx] = t.String()
+ if out[idx] != "none" {
+ allNones = false
}
}
- // Cant return None and !None from same op
- if strings.Contains(string(out), "_") {
- if strings.ContainsAny(string(out), "UB.") {
- panic("unexpected StackNone in opdoc typeString")
- }
- return ""
+ // If all the types are none, we just return
+ // an empty array, otherwise leave the nones
+ // in so we don't break the indices by omitting
+ // a valid none in a fields array
+ if allNones {
+ return nil
}
- return string(out)
+ return out
}
-func fieldsAndTypes(group logic.FieldGroup) ([]string, string) {
+func fieldsAndTypes(group logic.FieldGroup) ([]string, []string) {
// reminder: group.Names can be "sparse" See: logic.TxnaFields
fields := make([]string, 0, len(group.Names))
types := make([]logic.StackType, 0, len(group.Names))
@@ -297,10 +379,10 @@ func fieldsAndTypes(group logic.FieldGroup) ([]string, string) {
types = append(types, spec.Type())
}
}
- return fields, typeString(types)
+ return fields, typeStrings(types)
}
-func argEnums(name string) ([]string, string) {
+func argEnums(name string) ([]string, []string) {
// reminder: this needs to be manually updated every time
// a new opcode is added with an associated FieldGroup
// it'd be nice to have this auto-update
@@ -334,29 +416,31 @@ func argEnums(name string) ([]string, string) {
case "ecdsa_pk_recover", "ecdsa_verify", "ecdsa_pk_decompress":
return fieldsAndTypes(logic.EcdsaCurves)
default:
- return nil, ""
+ return nil, nil
}
}
-func buildLanguageSpec(opGroups map[string][]string) *LanguageSpec {
+func buildLanguageSpec(opGroups map[string][]string, namedTypes []namedType) *LanguageSpec {
opSpecs := logic.OpcodesByVersion(uint64(docVersion))
records := make([]OpRecord, len(opSpecs))
for i, spec := range opSpecs {
records[i].Opcode = spec.Opcode
records[i].Name = spec.Name
- records[i].Args = typeString(spec.Arg.Types)
- records[i].Returns = typeString(spec.Return.Types)
+ records[i].Args = typeStrings(spec.Arg.Types)
+ records[i].Returns = typeStrings(spec.Return.Types)
records[i].Size = spec.OpDetails.Size
records[i].ArgEnum, records[i].ArgEnumTypes = argEnums(spec.Name)
records[i].Doc = strings.ReplaceAll(logic.OpDoc(spec.Name), "<br />", "\n")
records[i].DocExtra = logic.OpDocExtra(spec.Name)
- records[i].ImmediateNote = logic.OpImmediateNote(spec.Name)
+ records[i].ImmediateNote = logic.OpImmediateDetailsFromSpec(spec)
records[i].Groups = opGroups[spec.Name]
records[i].IntroducedVersion = spec.Version
}
+
return &LanguageSpec{
EvalMaxVersion: docVersion,
LogicSigVersion: config.Consensus[protocol.ConsensusCurrentVersion].LogicSigVersion,
+ NamedTypes: namedTypes,
Ops: records,
}
}
@@ -389,6 +473,21 @@ func main() {
integerConstantsTableMarkdown(constants)
constants.Close()
+ named := make([]namedType, 0, len(logic.AllStackTypes))
+ for abbr, t := range logic.AllStackTypes {
+ named = append(named, namedType{
+ Name: t.String(),
+ Bound: []uint64{t.Bound[0], t.Bound[1]},
+ Abbreviation: string(abbr),
+ AVMType: t.AVMType.String(),
+ })
+ }
+ sort.Slice(named, func(i, j int) bool { return strings.Compare(named[i].Name, named[j].Name) > 0 })
+
+ namedStackTypes := create("named_stack_types.md")
+ namedStackTypesMarkdown(namedStackTypes, named)
+ namedStackTypes.Close()
+
written := make(map[string]bool)
opSpecs := logic.OpcodesByVersion(uint64(docVersion))
for _, spec := range opSpecs {
@@ -405,7 +504,10 @@ func main() {
langspecjs := create("langspec.json")
enc := json.NewEncoder(langspecjs)
enc.SetIndent("", " ")
- enc.Encode(buildLanguageSpec(opGroups))
+ err := enc.Encode(buildLanguageSpec(opGroups, named))
+ if err != nil {
+ panic(err.Error())
+ }
langspecjs.Close()
tealtm := create("teal.tmLanguage.json")
diff --git a/cmd/pingpong/runCmd.go b/cmd/pingpong/runCmd.go
index 7813d4f31..fe545f174 100644
--- a/cmd/pingpong/runCmd.go
+++ b/cmd/pingpong/runCmd.go
@@ -438,7 +438,9 @@ var runCmd = &cobra.Command{
cfg.GeneratedAccountSampleMethod = generatedAccountSampleMethod
}
// check if numAccounts is greater than the length of the mnemonic list, if provided
- if cfg.DeterministicKeys && cfg.NumPartAccounts > uint32(len(cfg.GeneratedAccountsMnemonics)) {
+ if cfg.DeterministicKeys &&
+ len(cfg.GeneratedAccountsMnemonics) > 0 &&
+ cfg.NumPartAccounts > uint32(len(cfg.GeneratedAccountsMnemonics)) {
reportErrorf("numAccounts is greater than number of account mnemonics provided")
}
diff --git a/cmd/tealdbg/localLedger.go b/cmd/tealdbg/localLedger.go
index c76039c7b..03cf088f4 100644
--- a/cmd/tealdbg/localLedger.go
+++ b/cmd/tealdbg/localLedger.go
@@ -285,6 +285,10 @@ func (l *localLedger) BlockHdrCached(basics.Round) (bookkeeping.BlockHeader, err
return bookkeeping.BlockHeader{}, nil
}
+func (l *localLedger) GetStateProofVerificationContext(_ basics.Round) (*ledgercore.StateProofVerificationContext, error) {
+ return nil, fmt.Errorf("localLedger: GetStateProofVerificationContext, needed for state proof verification, is not implemented in debugger")
+}
+
func (l *localLedger) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error {
return nil
}
diff --git a/cmd/tealdbg/localLedger_test.go b/cmd/tealdbg/localLedger_test.go
index 05640abd4..623215b46 100644
--- a/cmd/tealdbg/localLedger_test.go
+++ b/cmd/tealdbg/localLedger_test.go
@@ -83,7 +83,7 @@ int 2
a.NoError(err)
assetIdx := basics.AssetIndex(50)
- appIdx := basics.AppIndex(100)
+ appIdx := basics.AppIndex(1001)
br := makeSampleBalanceRecord(addr, assetIdx, appIdx)
balances := map[basics.Address]basics.AccountData{
addr: br.AccountData,
diff --git a/cmd/tealdbg/local_test.go b/cmd/tealdbg/local_test.go
index 01d5ca3b5..d0f6a3c2c 100644
--- a/cmd/tealdbg/local_test.go
+++ b/cmd/tealdbg/local_test.go
@@ -523,7 +523,7 @@ int 100
ProgramBlobs: [][]byte{[]byte(source)},
BalanceBlob: balanceBlob,
TxnBlob: txnBlob,
- Proto: string(protocol.ConsensusCurrentVersion),
+ Proto: string(protocol.ConsensusV37),
Round: 222,
LatestTimestamp: 333,
GroupIndex: 0,
diff --git a/cmd/updater/version_test.go b/cmd/updater/version_test.go
index 62954d51c..e46bd2f3e 100644
--- a/cmd/updater/version_test.go
+++ b/cmd/updater/version_test.go
@@ -29,10 +29,11 @@ func TestGetVersion(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testValidVersion(t, "algonode_update_0.1.0.log", uint64(0x00010000))
- testValidVersion(t, "algo_update_0.1.0", uint64(0x00010000))
- testValidVersion(t, "algo_update_65535.1.0", uint64(0xFFFF00010000))
- testValidVersion(t, "algo_update_65535.65535.65535", uint64(0xFFFFFFFFFFFF))
+ testValidVersion(t, "algonode_update_0.1.0.log", uint64(0x01000000))
+ testValidVersion(t, "algo_update_0.1.0", uint64(0x01000000))
+ testValidVersion(t, "algo_update_65535.1.0", uint64(0x00FFFF0001000000))
+ testValidVersion(t, "algo_update_65535.65535.65535", uint64(0xFFFFFFFF00FFFF))
+ testValidVersion(t, "algo_update_65535.65535.16777215", uint64(0xFFFFFFFFFFFFFF))
testInvalidVersion(t, "algo_update_0.-1.0")
testInvalidVersion(t, "algo_update_1e5.0.0")
diff --git a/config/config.go b/config/config.go
index a180a9a47..1da9dd059 100644
--- a/config/config.go
+++ b/config/config.go
@@ -72,7 +72,7 @@ const StateProofFileName = "stateproof.sqlite"
// It is used for tracking participation key metadata.
const ParticipationRegistryFilename = "partregistry.sqlite"
-// ConfigurableConsensusProtocolsFilename defines a set of consensus prototocols that
+// ConfigurableConsensusProtocolsFilename defines a set of consensus protocols that
// are to be loaded from the data directory ( if present ), to override the
// built-in supported consensus protocols.
const ConfigurableConsensusProtocolsFilename = "consensus.json"
diff --git a/config/consensus.go b/config/consensus.go
index f6e831493..4f4680c5e 100644
--- a/config/consensus.go
+++ b/config/consensus.go
@@ -487,6 +487,31 @@ type ConsensusParams struct {
// EnablePrecheckECDSACurve means that ecdsa_verify opcode will bail early,
// returning false, if pubkey is not on the curve.
EnablePrecheckECDSACurve bool
+
+ // EnableBareBudgetError specifies that I/O budget overruns should not be considered EvalError
+ EnableBareBudgetError bool
+
+ // StateProofUseTrackerVerification specifies whether the node will use data from state proof verification tracker
+ // in order to verify state proofs.
+ StateProofUseTrackerVerification bool
+
+ // EnableCatchpointsWithSPContexts specifies when to re-enable version 7 catchpoints.
+ // Version 7 includes state proof verification contexts
+ EnableCatchpointsWithSPContexts bool
+
+ // AppForbidLowResources enforces a rule that prevents apps from accessing
+ // asas and apps below 256, in an effort to decrease the ambiguity of
+ // opcodes that accept IDs or slot indexes. Simultaneously, the first ID
+ // allocated in new chains is raised to 1001.
+ AppForbidLowResources bool
+
+ // EnableBoxRefNameError specifies that box ref names should be validated early
+ EnableBoxRefNameError bool
+
+ // ExcludeExpiredCirculation excludes expired stake from the total online stake
+ // used by agreement for Circulation, and updates the calculation of StateProofOnlineTotalWeight used
+ // by state proofs to use the same method (rather than excluding stake from the top N stakeholders as before).
+ ExcludeExpiredCirculation bool
}
// PaysetCommitType enumerates possible ways for the block header to commit to
@@ -654,7 +679,7 @@ func (cp ConsensusProtocols) Merge(configurableConsensus ConsensusProtocols) Con
for cVer, cParam := range staticConsensus {
if cVer == consensusVersion {
delete(staticConsensus, cVer)
- } else if _, has := cParam.ApprovedUpgrades[consensusVersion]; has {
+ } else {
// delete upgrade to deleted version
delete(cParam.ApprovedUpgrades, consensusVersion)
}
@@ -1247,13 +1272,46 @@ func initConsensusProtocols() {
v35.ApprovedUpgrades[protocol.ConsensusV36] = 140000
+ v37 := v36
+ v37.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+
+ Consensus[protocol.ConsensusV37] = v37
+
+ // v36 can be upgraded to v37, with an update delay of 7 days ( see calculation above )
+ v36.ApprovedUpgrades[protocol.ConsensusV37] = 140000
+
+ v38 := v37
+ v38.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+
+ // enables state proof recoverability
+ v38.StateProofUseTrackerVerification = true
+ v38.EnableCatchpointsWithSPContexts = true
+
+ // online circulation on-demand expiration
+ v38.ExcludeExpiredCirculation = true
+
+ // TEAL resources sharing and other features
+ v38.LogicSigVersion = 9
+ v38.EnablePrecheckECDSACurve = true
+ v38.AppForbidLowResources = true
+ v38.EnableBareBudgetError = true
+ v38.EnableBoxRefNameError = true
+
+ v38.AgreementFilterTimeoutPeriod0 = 3000 * time.Millisecond
+
+ Consensus[protocol.ConsensusV38] = v38
+
+ // v37 can be upgraded to v38, with an update delay of 12h:
+ // 10046 = (12 * 60 * 60 / 4.3)
+ // for the sake of future manual calculations, we'll round that down a bit :
+ v37.ApprovedUpgrades[protocol.ConsensusV38] = 10000
+
// ConsensusFuture is used to test features that are implemented
// but not yet released in a production protocol version.
- vFuture := v36
+ vFuture := v38
vFuture.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- vFuture.LogicSigVersion = 9 // When moving this to a release, put a new higher LogicSigVersion here
- vFuture.EnablePrecheckECDSACurve = true
+ vFuture.LogicSigVersion = 10 // When moving this to a release, put a new higher LogicSigVersion here
Consensus[protocol.ConsensusFuture] = vFuture
diff --git a/config/localTemplate.go b/config/localTemplate.go
index 95a8a493b..a9dd313bb 100644
--- a/config/localTemplate.go
+++ b/config/localTemplate.go
@@ -506,6 +506,10 @@ type Local struct {
// and APIs related to broadcasting transactions, and enables APIs which can retrieve detailed information
// from ledger caches and can control the ledger round.
EnableFollowMode bool `version[27]:"false"`
+
+ // EnableTxnEvalTracer turns on features in the BlockEvaluator which collect data on transactions, exposing them via algod APIs.
+ // It will store txn deltas created during block evaluation, potentially consuming much larger amounts of memory,
+ EnableTxnEvalTracer bool `version[27]:"false"`
}
// DNSBootstrapArray returns an array of one or more DNS Bootstrap identifiers
diff --git a/config/local_defaults.go b/config/local_defaults.go
index 48ddae4be..81e4a2587 100644
--- a/config/local_defaults.go
+++ b/config/local_defaults.go
@@ -75,6 +75,7 @@ var defaultLocal = Local{
EnableRuntimeMetrics: false,
EnableTopAccountsReporting: false,
EnableTxBacklogRateLimiting: false,
+ EnableTxnEvalTracer: false,
EnableUsageLog: false,
EnableVerbosedTransactionSyncLogging: false,
EndpointAddress: "127.0.0.1:0",
diff --git a/config/migrate.go b/config/migrate.go
index 35b4ec82f..11c8aa108 100644
--- a/config/migrate.go
+++ b/config/migrate.go
@@ -128,9 +128,6 @@ func getLatestConfigVersion() uint32 {
}
func getVersionedDefaultLocalConfig(version uint32) (local Local) {
- if version < 0 {
- return
- }
if version > 0 {
local = getVersionedDefaultLocalConfig(version - 1)
}
diff --git a/config/version.go b/config/version.go
index 06701e9f1..2e274a552 100644
--- a/config/version.go
+++ b/config/version.go
@@ -33,7 +33,7 @@ const VersionMajor = 3
// VersionMinor is the Minor semantic version number (x.#.z) - changed when backwards-compatible features are introduced.
// Not enforced until after initial public release (x > 0).
-const VersionMinor = 15
+const VersionMinor = 16
// Version is the type holding our full version information.
type Version struct {
@@ -72,7 +72,7 @@ func (v Version) AsUInt64() (versionInfo uint64) {
versionInfo = uint64(v.Major)
versionInfo <<= 16
versionInfo |= uint64(v.Minor)
- versionInfo <<= 16
+ versionInfo <<= 24
versionInfo |= uint64(v.BuildNumber)
return
}
diff --git a/config/version_test.go b/config/version_test.go
new file mode 100644
index 000000000..1a37cf185
--- /dev/null
+++ b/config/version_test.go
@@ -0,0 +1,60 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package config
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/s3"
+ "github.com/stretchr/testify/require"
+)
+
+func TestAlgodVsUpdatedVersions(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ tests := []struct {
+ major int
+ minor int
+ build int
+ }{
+ {major: 1, minor: 1, build: 32111},
+ {major: 2, minor: 0, build: 0},
+ {major: 3, minor: 13, build: 170018},
+ {major: 3, minor: 15, build: 157},
+ {major: 3, minor: 16, build: 0},
+ {major: 3, minor: 16, build: 100},
+ }
+
+ for _, tt := range tests {
+ t.Run(fmt.Sprintf("%d.%d.%d", tt.major, tt.minor, tt.build), func(t *testing.T) {
+ version := Version{Major: tt.major, Minor: tt.minor, BuildNumber: tt.build}
+ str := version.String()
+ ver, err := s3.GetVersionFromName("_" + str)
+ require.NoError(t, err)
+ require.Equal(t, version.AsUInt64(), ver)
+ major, minor, patch, err := s3.GetVersionPartsFromVersion(ver)
+ require.NoError(t, err)
+ require.Equal(t, uint64(tt.major), major)
+ require.Equal(t, uint64(tt.minor), minor)
+ require.Equal(t, uint64(tt.build), patch)
+
+ })
+ }
+}
diff --git a/crypto/merklesignature/keysBuilder.go b/crypto/merklesignature/keysBuilder.go
index d284ca29b..50498b519 100644
--- a/crypto/merklesignature/keysBuilder.go
+++ b/crypto/merklesignature/keysBuilder.go
@@ -84,7 +84,7 @@ func calculateRanges(numberOfKeys uint64) (numOfKeysPerRoutine uint64, numOfRout
func generateKeysForRange(ctx context.Context, startIdx uint64, endIdx uint64, keys []crypto.FalconSigner) error {
for k := startIdx; k < endIdx; k++ {
if ctx.Err() != nil {
- break
+ return nil //nolint:nilerr // we don't need to return the ctx error, since the other goroutine will report it.
}
sigAlgo, err := crypto.NewFalconSigner()
if err != nil {
diff --git a/crypto/merkletrie/cache.go b/crypto/merkletrie/cache.go
index 208397edb..01648d73a 100644
--- a/crypto/merkletrie/cache.go
+++ b/crypto/merkletrie/cache.go
@@ -39,13 +39,13 @@ const (
// be found in neither the in-memory cache or on the persistent storage.
var ErrLoadedPageMissingNode = errors.New("loaded page is missing a node")
-// ErrPageDecodingFailuire is returned if the decoding of a page has failed.
-var ErrPageDecodingFailuire = errors.New("error encountered while decoding page")
+// ErrPageDecodingFailure is returned if the decoding of a page has failed.
+var ErrPageDecodingFailure = errors.New("error encountered while decoding page")
type merkleTrieCache struct {
- // mt is a point to the originating trie
+ // mt is a pointer to the originating trie
mt *Trie
- // committer is the backing up storage for the cache. ( memory, database, etc. )
+ // committer is the backing store for the cache. ( memory, database, etc. )
committer Committer
// cachedNodeCount is the number of currently cached, in-memory, nodes stored in the pageToNIDsPtr structure.
cachedNodeCount int
@@ -293,7 +293,7 @@ func (mtc *merkleTrieCache) beginTransaction() {
mtc.txNextNodeID = mtc.mt.nextNodeID
}
-// commitTransaction - used internaly by the Trie
+// commitTransaction - used internally by the Trie
func (mtc *merkleTrieCache) commitTransaction() {
// the created nodes are already on the list.
for nodeID := range mtc.txCreatedNodeIDs {
@@ -426,7 +426,7 @@ func (mtc *merkleTrieCache) commit() (CommitStats, error) {
// reallocatePendingPages is called by the commit() function, and is responsible for performing two tasks -
// 1. calculate the hashes of all the newly created nodes
-// 2. reornigize the pending flush nodes into an optimal page list, and construct a list of pages that need to be created, deleted and updated.
+// 2. reorganize the pending flush nodes into an optimal page list, and construct a list of pages that need to be created, deleted and updated.
func (mtc *merkleTrieCache) reallocatePendingPages(stats *CommitStats) (pagesToCreate []uint64, pagesToDelete map[uint64]bool, pagesToUpdate map[uint64]map[storedNodeIdentifier]*node, err error) {
// newPageThreshold is the threshold at which all the pages are newly created pages that were never committed.
newPageThreshold := uint64(mtc.mt.lastCommittedNodeID) / uint64(mtc.nodesPerPage)
@@ -669,26 +669,26 @@ func (mtc *merkleTrieCache) reallocateNode(nid storedNodeIdentifier) storedNodeI
func decodePage(bytes []byte) (nodesMap map[storedNodeIdentifier]*node, err error) {
version, versionLength := binary.Uvarint(bytes[:])
if versionLength <= 0 {
- return nil, ErrPageDecodingFailuire
+ return nil, ErrPageDecodingFailure
}
- if version != NodePageVersion {
- return nil, ErrPageDecodingFailuire
+ if version != nodePageVersion {
+ return nil, ErrPageDecodingFailure
}
nodesCount, nodesCountLength := binary.Varint(bytes[versionLength:])
if nodesCountLength <= 0 {
- return nil, ErrPageDecodingFailuire
+ return nil, ErrPageDecodingFailure
}
nodesMap = make(map[storedNodeIdentifier]*node)
walk := nodesCountLength + versionLength
for i := int64(0); i < nodesCount; i++ {
nodeID, nodesIDLength := binary.Uvarint(bytes[walk:])
if nodesIDLength <= 0 {
- return nil, ErrPageDecodingFailuire
+ return nil, ErrPageDecodingFailure
}
walk += nodesIDLength
pnode, nodeLength := deserializeNode(bytes[walk:])
if nodeLength <= 0 {
- return nil, ErrPageDecodingFailuire
+ return nil, ErrPageDecodingFailure
}
walk += nodeLength
nodesMap[storedNodeIdentifier(nodeID)] = pnode
@@ -699,7 +699,7 @@ func decodePage(bytes []byte) (nodesMap map[storedNodeIdentifier]*node, err erro
// decodePage encodes a page contents into a byte array
func (mtc *merkleTrieCache) encodePage(nodeIDs map[storedNodeIdentifier]*node, serializedBuffer []byte) []byte {
- version := binary.PutUvarint(serializedBuffer[:], NodePageVersion)
+ version := binary.PutUvarint(serializedBuffer[:], nodePageVersion)
length := binary.PutVarint(serializedBuffer[version:], int64(len(nodeIDs)))
walk := version + length
for nodeID, pnode := range nodeIDs {
diff --git a/crypto/merkletrie/cache_test.go b/crypto/merkletrie/cache_test.go
index d9c7a23e3..5967f0114 100644
--- a/crypto/merkletrie/cache_test.go
+++ b/crypto/merkletrie/cache_test.go
@@ -298,7 +298,7 @@ func (mt *Trie) TestDeleteRollback(d []byte) (bool, error) {
if err != nil {
return false, err
}
- found, err := pnode.find(mt.cache, d[:])
+ found, err := pnode.find(&mt.cache, d[:])
if !found || err != nil {
return false, err
}
@@ -311,7 +311,7 @@ func (mt *Trie) TestDeleteRollback(d []byte) (bool, error) {
mt.elementLength = 0
return true, nil
}
- _, err = pnode.remove(mt.cache, d[:], make([]byte, 0, len(d)))
+ _, err = pnode.remove(&mt.cache, d[:], make([]byte, 0, len(d)))
// unlike the "real" function, we want always to fail here to test the rollbackTransaction() functionality.
mt.cache.rollbackTransaction()
return false, fmt.Errorf("this is a test for failing a Delete request")
diff --git a/crypto/merkletrie/committer.go b/crypto/merkletrie/committer.go
index bad5fe7e0..5e5ae758a 100644
--- a/crypto/merkletrie/committer.go
+++ b/crypto/merkletrie/committer.go
@@ -26,7 +26,7 @@ const (
inMemoryCommitterPageSize = int64(512)
)
-// InMemoryCommitter is a fully function in-memory committer, supporting
+// InMemoryCommitter is a fully functional in-memory committer, supporting
// persistence of pages.
type InMemoryCommitter struct {
memStore map[uint64][]byte
diff --git a/crypto/merkletrie/committer_test.go b/crypto/merkletrie/committer_test.go
index 6f8dfb2a7..c8bfd7143 100644
--- a/crypto/merkletrie/committer_test.go
+++ b/crypto/merkletrie/committer_test.go
@@ -140,18 +140,18 @@ func TestNoRedundentPages(t *testing.T) {
require.Equal(t, nodesCount, mt1.cache.cachedNodeCount)
}
-// decodePage decodes a byte array into a page content
+// decodePageHeaderSize decodes a page header at the start of a byte array
func decodePageHeaderSize(bytes []byte) (headerSize int, err error) {
version, versionLength := binary.Uvarint(bytes[:])
if versionLength <= 0 {
- return 0, ErrPageDecodingFailuire
+ return 0, ErrPageDecodingFailure
}
- if version != NodePageVersion {
- return 0, ErrPageDecodingFailuire
+ if version != nodePageVersion {
+ return 0, ErrPageDecodingFailure
}
_, nodesCountLength := binary.Varint(bytes[versionLength:])
if nodesCountLength <= 0 {
- return 0, ErrPageDecodingFailuire
+ return 0, ErrPageDecodingFailure
}
return nodesCountLength + versionLength, nil
}
diff --git a/crypto/merkletrie/node.go b/crypto/merkletrie/node.go
index 33c2f673a..de765793d 100644
--- a/crypto/merkletrie/node.go
+++ b/crypto/merkletrie/node.go
@@ -140,8 +140,9 @@ func (n *node) add(cache *merkleTrieCache, d []byte, path []byte) (nodeID stored
}
pnode.hash = append(path, d[:idiff]...)
+ // create ancestors from pnode up to the new split
for i := idiff - 1; i >= 0; i-- {
- // create a parent node for pnode.
+ // create a parent node for pnode, and move up
pnode2, nodeID2 := cache.allocateNewNode()
pnode2.childrenMask.SetBit(d[i])
pnode2.children = []childEntry{
@@ -152,7 +153,6 @@ func (n *node) add(cache *merkleTrieCache, d []byte, path []byte) (nodeID stored
}
pnode2.hash = append(path, d[:i]...)
- pnode = pnode2
nodeID = nodeID2
}
return nodeID, nil
@@ -160,16 +160,14 @@ func (n *node) add(cache *merkleTrieCache, d []byte, path []byte) (nodeID stored
if n.childrenMask.Bit(d[0]) == false {
// no such child.
- var childNode *node
- var childNodeID storedNodeIdentifier
- childNode, childNodeID = cache.allocateNewNode()
+ childNode, childNodeID := cache.allocateNewNode()
childNode.hash = d[1:]
pnode, nodeID = cache.allocateNewNode()
pnode.childrenMask = n.childrenMask
pnode.childrenMask.SetBit(d[0])
- pnode.children = make([]childEntry, len(n.children)+1, len(n.children)+1)
+ pnode.children = make([]childEntry, len(n.children)+1)
if d[0] > n.children[len(n.children)-1].hashIndex {
// the new entry comes after all the existing ones.
for i, child := range n.children {
@@ -183,8 +181,8 @@ func (n *node) add(cache *merkleTrieCache, d []byte, path []byte) (nodeID stored
for i, child := range n.children {
if d[0] < child.hashIndex {
pnode.children[i] = childEntry{
- hashIndex: d[0],
id: childNodeID,
+ hashIndex: d[0],
}
// copy the rest of the items.
for ; i < len(n.children); i++ {
@@ -211,7 +209,7 @@ func (n *node) add(cache *merkleTrieCache, d []byte, path []byte) (nodeID stored
pnode, nodeID = childNode, cache.refurbishNode(curNodeID)
pnode.childrenMask = n.childrenMask
if len(pnode.children) < len(n.children) {
- pnode.children = make([]childEntry, len(n.children), len(n.children))
+ pnode.children = make([]childEntry, len(n.children))
} else {
pnode.children = pnode.children[:len(n.children)]
}
@@ -270,7 +268,7 @@ func (n *node) remove(cache *merkleTrieCache, key []byte, path []byte) (nodeID s
pnode, nodeID = childNode, cache.refurbishNode(childNodeID)
pnode.childrenMask = n.childrenMask
// we are guaranteed to have other children, because our tree forbids nodes that have exactly one leaf child and no other children.
- pnode.children = make([]childEntry, len(n.children)-1, len(n.children)-1)
+ pnode.children = make([]childEntry, len(n.children)-1)
copy(pnode.children, append(n.children[:childIndex], n.children[childIndex+1:]...))
pnode.childrenMask.ClearBit(key[0])
} else {
@@ -283,7 +281,7 @@ func (n *node) remove(cache *merkleTrieCache, key []byte, path []byte) (nodeID s
pnode, nodeID = childNode, cache.refurbishNode(childNodeID)
pnode.childrenMask = n.childrenMask
if len(pnode.children) < len(n.children) {
- pnode.children = make([]childEntry, len(n.children), len(n.children))
+ pnode.children = make([]childEntry, len(n.children))
} else {
pnode.children = pnode.children[:len(n.children)]
}
@@ -371,7 +369,7 @@ func deserializeNode(buf []byte) (n *node, s int) {
prevChildIndex = childIndex
i++
}
- n.children = make([]childEntry, i, i)
+ n.children = make([]childEntry, i)
copy(n.children, childEntries[:i])
return
}
diff --git a/crypto/merkletrie/node_test.go b/crypto/merkletrie/node_test.go
index 1495a8e9c..893fbc86b 100644
--- a/crypto/merkletrie/node_test.go
+++ b/crypto/merkletrie/node_test.go
@@ -17,6 +17,8 @@
package merkletrie
import (
+ "crypto/sha512"
+ "encoding/binary"
"testing"
"github.com/stretchr/testify/require"
@@ -67,17 +69,24 @@ func (n *node) leafUsingChildrenLength() bool {
return len(n.children) == 0
}
+func makeHashes(n int) [][]byte {
+ hashes := make([][]byte, n)
+ for i := 0; i < len(hashes); i++ {
+ buf := make([]byte, 32)
+ binary.BigEndian.PutUint64(buf, uint64(i))
+ h := crypto.Hash(buf)
+ hashes[i] = h[:]
+ }
+ return hashes
+}
+
func BenchmarkNodeLeafImplementation(b *testing.B) {
+ hashes := makeHashes(100000)
+
b.Run("leaf-ChildrenMask", func(b *testing.B) {
var memoryCommitter InMemoryCommitter
memConfig := defaultTestMemoryConfig
mt1, _ := MakeTrie(&memoryCommitter, memConfig)
- // create 100000 hashes.
- leafsCount := 100000
- hashes := make([]crypto.Digest, leafsCount)
- for i := 0; i < len(hashes); i++ {
- hashes[i] = crypto.Hash([]byte{byte(i % 256), byte((i / 256) % 256), byte(i / 65536)})
- }
for i := 0; i < len(hashes); i++ {
mt1.Add(hashes[i][:])
@@ -100,12 +109,6 @@ func BenchmarkNodeLeafImplementation(b *testing.B) {
var memoryCommitter InMemoryCommitter
memConfig := defaultTestMemoryConfig
mt1, _ := MakeTrie(&memoryCommitter, memConfig)
- // create 100000 hashes.
- leafsCount := 100000
- hashes := make([]crypto.Digest, leafsCount)
- for i := 0; i < len(hashes); i++ {
- hashes[i] = crypto.Hash([]byte{byte(i % 256), byte((i / 256) % 256), byte(i / 65536)})
- }
for i := 0; i < len(hashes); i++ {
mt1.Add(hashes[i][:])
@@ -125,3 +128,76 @@ func BenchmarkNodeLeafImplementation(b *testing.B) {
}
})
}
+
+// calculateHashIncrementally uses the Writer interface to the crypto digest to
+// avoid accumulating in a buffer. Yet it's slower! I don't know why, but
+// leaving it here to benchmark more carefully later. (The final use of
+// d.Sum(nil) instead of d.Sum(n.hash[:0]) is needed because we share the
+// backing array for the slices in node hashes. But that is not the cause of the
+// slow down.)
+func (n *node) calculateHashIncrementally(cache *merkleTrieCache) error {
+ if n.leaf() {
+ return nil
+ }
+ path := n.hash
+
+ d := sha512.New512_256()
+
+ // we add this string length before the actual string so it could get "decoded"; in practice, it makes a good domain separator.
+ d.Write([]byte{byte(len(path))})
+ d.Write(path)
+ for _, child := range n.children {
+ childNode, err := cache.getNode(child.id)
+ if err != nil {
+ return err
+ }
+ if childNode.leaf() {
+ d.Write([]byte{0})
+ } else {
+ d.Write([]byte{1})
+ }
+ // we add this string length before the actual string so it could get "decoded"; in practice, it makes a good domain separator.
+ d.Write([]byte{byte(len(childNode.hash))})
+ d.Write([]byte{child.hashIndex}) // adding the first byte of the child
+ d.Write(childNode.hash) // adding the reminder of the child
+ }
+ n.hash = d.Sum(nil)
+ return nil
+}
+
+func BenchmarkAdd(b *testing.B) {
+ b.ReportAllocs()
+
+ memConfig := defaultTestMemoryConfig
+ mt, _ := MakeTrie(&InMemoryCommitter{}, memConfig)
+ hashes := makeHashes(b.N)
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ mt.Add(hashes[i])
+ if i%1000 == 999 {
+ mt.Commit() // not sure how often we should Commit for a nice benchmark
+ }
+ }
+}
+
+func BenchmarkDelete(b *testing.B) {
+ b.ReportAllocs()
+
+ memConfig := defaultTestMemoryConfig
+ mt, _ := MakeTrie(&InMemoryCommitter{}, memConfig)
+ hashes := makeHashes(b.N)
+ for i := 0; i < b.N; i++ {
+ mt.Add(hashes[i])
+ }
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ mt.Delete(hashes[i])
+ if i%1000 == 999 { // not sure how often we should Commit for a nice benchmark
+ mt.Commit()
+ }
+ }
+}
diff --git a/crypto/merkletrie/trie.go b/crypto/merkletrie/trie.go
index 7a214e709..39051699f 100644
--- a/crypto/merkletrie/trie.go
+++ b/crypto/merkletrie/trie.go
@@ -24,16 +24,16 @@ import (
)
const (
- // MerkleTreeVersion is the version of the encoded trie. If we ever want to make changes and want to have upgrade path,
+ // merkleTreeVersion is the version of the encoded trie. If we ever want to make changes and want to have upgrade path,
// this would give us the ability to do so.
- MerkleTreeVersion = uint64(0x1000000010000000)
- // NodePageVersion is the version of the encoded node. If we ever want to make changes and want to have upgrade path,
+ merkleTreeVersion = uint64(0x1000000010000000)
+ // nodePageVersion is the version of the encoded node. If we ever want to make changes and want to have upgrade path,
// this would give us the ability to do so.
- NodePageVersion = uint64(0x1000000010000000)
+ nodePageVersion = uint64(0x1000000010000000)
)
-// ErrRootPageDecodingFailuire is returned if the decoding the root page has failed.
-var ErrRootPageDecodingFailuire = errors.New("error encountered while decoding root page")
+// ErrRootPageDecodingFailure is returned if the decoding the root page has failed.
+var ErrRootPageDecodingFailure = errors.New("error encountered while decoding root page")
// ErrMismatchingElementLength is returned when an element is being added/removed from the trie that doesn't align with the trie's previous elements length
var ErrMismatchingElementLength = errors.New("mismatching element length")
@@ -63,7 +63,7 @@ type Trie struct {
root storedNodeIdentifier
nextNodeID storedNodeIdentifier
lastCommittedNodeID storedNodeIdentifier
- cache *merkleTrieCache
+ cache merkleTrieCache
elementLength int
}
@@ -79,7 +79,7 @@ type Stats struct {
func MakeTrie(committer Committer, memoryConfig MemoryConfig) (*Trie, error) {
mt := &Trie{
root: storedNodeIdentifierNull,
- cache: &merkleTrieCache{},
+ cache: merkleTrieCache{},
nextNodeID: storedNodeIdentifierBase,
lastCommittedNodeID: storedNodeIdentifierBase,
}
@@ -106,11 +106,9 @@ func MakeTrie(committer Committer, memoryConfig MemoryConfig) (*Trie, error) {
return mt, nil
}
-// SetCommitter set the provided committter as the current committer, and return the old one.
-func (mt *Trie) SetCommitter(committer Committer) (prevCommitter Committer) {
- prevCommitter = mt.cache.committer
+// SetCommitter sets the provided committer as the current committer
+func (mt *Trie) SetCommitter(committer Committer) {
mt.cache.committer = committer
- return
}
// RootHash returns the root hash of all the elements in the trie
@@ -154,13 +152,13 @@ func (mt *Trie) Add(d []byte) (bool, error) {
if err != nil {
return false, err
}
- found, err := pnode.find(mt.cache, d[:])
+ found, err := pnode.find(&mt.cache, d[:])
if found || (err != nil) {
return false, err
}
mt.cache.beginTransaction()
var updatedRoot storedNodeIdentifier
- updatedRoot, err = pnode.add(mt.cache, d[:], make([]byte, 0, len(d)))
+ updatedRoot, err = pnode.add(&mt.cache, d[:], make([]byte, 0, len(d)))
if err != nil {
mt.cache.rollbackTransaction()
return false, err
@@ -184,7 +182,7 @@ func (mt *Trie) Delete(d []byte) (bool, error) {
if err != nil {
return false, err
}
- found, err := pnode.find(mt.cache, d[:])
+ found, err := pnode.find(&mt.cache, d[:])
if !found || err != nil {
return false, err
}
@@ -198,7 +196,7 @@ func (mt *Trie) Delete(d []byte) (bool, error) {
return true, nil
}
var updatedRoot storedNodeIdentifier
- updatedRoot, err = pnode.remove(mt.cache, d[:], make([]byte, 0, len(d)))
+ updatedRoot, err = pnode.remove(&mt.cache, d[:], make([]byte, 0, len(d)))
if err != nil {
mt.cache.rollbackTransaction()
return false, err
@@ -218,7 +216,7 @@ func (mt *Trie) GetStats() (stats Stats, err error) {
if err != nil {
return Stats{}, err
}
- err = pnode.stats(mt.cache, &stats, 1)
+ err = pnode.stats(&mt.cache, &stats, 1)
return
}
@@ -252,7 +250,7 @@ func (mt *Trie) Evict(commit bool) (int, error) {
// serialize serializes the trie root
func (mt *Trie) serialize() []byte {
serializedBuffer := make([]byte, 5*binary.MaxVarintLen64) // allocate the worst-case scenario for the trie header.
- version := binary.PutUvarint(serializedBuffer[:], MerkleTreeVersion)
+ version := binary.PutUvarint(serializedBuffer[:], merkleTreeVersion)
root := binary.PutUvarint(serializedBuffer[version:], uint64(mt.root))
next := binary.PutUvarint(serializedBuffer[version+root:], uint64(mt.nextNodeID))
elementLength := binary.PutUvarint(serializedBuffer[version+root+next:], uint64(mt.elementLength))
@@ -260,30 +258,30 @@ func (mt *Trie) serialize() []byte {
return serializedBuffer[:version+root+next+elementLength+pageSizeLength]
}
-// serialize serializes the trie root
+// deserialize deserializes the trie root
func (mt *Trie) deserialize(bytes []byte) (int64, error) {
version, versionLen := binary.Uvarint(bytes[:])
if versionLen <= 0 {
- return 0, ErrRootPageDecodingFailuire
+ return 0, ErrRootPageDecodingFailure
}
- if version != MerkleTreeVersion {
- return 0, ErrRootPageDecodingFailuire
+ if version != merkleTreeVersion {
+ return 0, ErrRootPageDecodingFailure
}
root, rootLen := binary.Uvarint(bytes[versionLen:])
if rootLen <= 0 {
- return 0, ErrRootPageDecodingFailuire
+ return 0, ErrRootPageDecodingFailure
}
nextNodeID, nextNodeIDLen := binary.Uvarint(bytes[versionLen+rootLen:])
if nextNodeIDLen <= 0 {
- return 0, ErrRootPageDecodingFailuire
+ return 0, ErrRootPageDecodingFailure
}
elemLength, elemLengthLength := binary.Uvarint(bytes[versionLen+rootLen+nextNodeIDLen:])
if elemLengthLength <= 0 {
- return 0, ErrRootPageDecodingFailuire
+ return 0, ErrRootPageDecodingFailure
}
pageSize, pageSizeLength := binary.Uvarint(bytes[versionLen+rootLen+nextNodeIDLen+elemLengthLength:])
if pageSizeLength <= 0 {
- return 0, ErrRootPageDecodingFailuire
+ return 0, ErrRootPageDecodingFailure
}
mt.root = storedNodeIdentifier(root)
mt.nextNodeID = storedNodeIdentifier(nextNodeID)
diff --git a/crypto/stateproof/msgp_gen.go b/crypto/stateproof/msgp_gen.go
index 93e383c42..8d4d8a350 100644
--- a/crypto/stateproof/msgp_gen.go
+++ b/crypto/stateproof/msgp_gen.go
@@ -6,6 +6,9 @@ import (
"sort"
"github.com/algorand/msgp/msgp"
+
+ "github.com/algorand/go-algorand/crypto/merklearray"
+ "github.com/algorand/go-algorand/data/basics"
)
// The following msgp objects are implemented in this file:
@@ -17,6 +20,22 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// Prover
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// ProverPersistedFields
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// Reveal
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -82,6 +101,664 @@ func (z *MessageHash) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *Prover) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0004Len := uint32(7)
+ var zb0004Mask uint16 /* 11 bits */
+ if (*z).ProverPersistedFields.Data == (MessageHash{}) {
+ zb0004Len--
+ zb0004Mask |= 0x4
+ }
+ if (*z).ProverPersistedFields.LnProvenWeight == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x8
+ }
+ if len((*z).ProverPersistedFields.Participants) == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x10
+ }
+ if (*z).ProverPersistedFields.Parttree == nil {
+ zb0004Len--
+ zb0004Mask |= 0x20
+ }
+ if (*z).ProverPersistedFields.ProvenWeight == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x40
+ }
+ if (*z).ProverPersistedFields.Round == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x80
+ }
+ if (*z).ProverPersistedFields.StrengthTarget == 0 {
+ zb0004Len--
+ zb0004Mask |= 0x400
+ }
+ // variable map header, size zb0004Len
+ o = append(o, 0x80|uint8(zb0004Len))
+ if zb0004Len != 0 {
+ if (zb0004Mask & 0x4) == 0 { // if not empty
+ // string "data"
+ o = append(o, 0xa4, 0x64, 0x61, 0x74, 0x61)
+ o = msgp.AppendBytes(o, ((*z).ProverPersistedFields.Data)[:])
+ }
+ if (zb0004Mask & 0x8) == 0 { // if not empty
+ // string "lnprv"
+ o = append(o, 0xa5, 0x6c, 0x6e, 0x70, 0x72, 0x76)
+ o = msgp.AppendUint64(o, (*z).ProverPersistedFields.LnProvenWeight)
+ }
+ if (zb0004Mask & 0x10) == 0 { // if not empty
+ // string "parts"
+ o = append(o, 0xa5, 0x70, 0x61, 0x72, 0x74, 0x73)
+ if (*z).ProverPersistedFields.Participants == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).ProverPersistedFields.Participants)))
+ }
+ for zb0002 := range (*z).ProverPersistedFields.Participants {
+ o = (*z).ProverPersistedFields.Participants[zb0002].MarshalMsg(o)
+ }
+ }
+ if (zb0004Mask & 0x20) == 0 { // if not empty
+ // string "parttree"
+ o = append(o, 0xa8, 0x70, 0x61, 0x72, 0x74, 0x74, 0x72, 0x65, 0x65)
+ if (*z).ProverPersistedFields.Parttree == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = (*z).ProverPersistedFields.Parttree.MarshalMsg(o)
+ }
+ }
+ if (zb0004Mask & 0x40) == 0 { // if not empty
+ // string "prv"
+ o = append(o, 0xa3, 0x70, 0x72, 0x76)
+ o = msgp.AppendUint64(o, (*z).ProverPersistedFields.ProvenWeight)
+ }
+ if (zb0004Mask & 0x80) == 0 { // if not empty
+ // string "rnd"
+ o = append(o, 0xa3, 0x72, 0x6e, 0x64)
+ o = msgp.AppendUint64(o, (*z).ProverPersistedFields.Round)
+ }
+ if (zb0004Mask & 0x400) == 0 { // if not empty
+ // string "str"
+ o = append(o, 0xa3, 0x73, 0x74, 0x72)
+ o = msgp.AppendUint64(o, (*z).ProverPersistedFields.StrengthTarget)
+ }
+ }
+ return
+}
+
+func (_ *Prover) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*Prover)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *Prover) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0004 int
+ var zb0005 bool
+ zb0004, zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0004 > 0 {
+ zb0004--
+ bts, err = msgp.ReadExactBytes(bts, ((*z).ProverPersistedFields.Data)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Data")
+ return
+ }
+ }
+ if zb0004 > 0 {
+ zb0004--
+ (*z).ProverPersistedFields.Round, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Round")
+ return
+ }
+ }
+ if zb0004 > 0 {
+ zb0004--
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Participants")
+ return
+ }
+ if zb0006 > VotersAllocBound {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(VotersAllocBound))
+ err = msgp.WrapError(err, "struct-from-array", "Participants")
+ return
+ }
+ if zb0007 {
+ (*z).ProverPersistedFields.Participants = nil
+ } else if (*z).ProverPersistedFields.Participants != nil && cap((*z).ProverPersistedFields.Participants) >= zb0006 {
+ (*z).ProverPersistedFields.Participants = ((*z).ProverPersistedFields.Participants)[:zb0006]
+ } else {
+ (*z).ProverPersistedFields.Participants = make([]basics.Participant, zb0006)
+ }
+ for zb0002 := range (*z).ProverPersistedFields.Participants {
+ bts, err = (*z).ProverPersistedFields.Participants[zb0002].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Participants", zb0002)
+ return
+ }
+ }
+ }
+ if zb0004 > 0 {
+ zb0004--
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ (*z).ProverPersistedFields.Parttree = nil
+ } else {
+ if (*z).ProverPersistedFields.Parttree == nil {
+ (*z).ProverPersistedFields.Parttree = new(merklearray.Tree)
+ }
+ bts, err = (*z).ProverPersistedFields.Parttree.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Parttree")
+ return
+ }
+ }
+ }
+ if zb0004 > 0 {
+ zb0004--
+ (*z).ProverPersistedFields.LnProvenWeight, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "LnProvenWeight")
+ return
+ }
+ }
+ if zb0004 > 0 {
+ zb0004--
+ (*z).ProverPersistedFields.ProvenWeight, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ProvenWeight")
+ return
+ }
+ }
+ if zb0004 > 0 {
+ zb0004--
+ (*z).ProverPersistedFields.StrengthTarget, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "StrengthTarget")
+ return
+ }
+ }
+ if zb0004 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0004)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0005 {
+ (*z) = Prover{}
+ }
+ for zb0004 > 0 {
+ zb0004--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "data":
+ bts, err = msgp.ReadExactBytes(bts, ((*z).ProverPersistedFields.Data)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "Data")
+ return
+ }
+ case "rnd":
+ (*z).ProverPersistedFields.Round, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Round")
+ return
+ }
+ case "parts":
+ var zb0008 int
+ var zb0009 bool
+ zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Participants")
+ return
+ }
+ if zb0008 > VotersAllocBound {
+ err = msgp.ErrOverflow(uint64(zb0008), uint64(VotersAllocBound))
+ err = msgp.WrapError(err, "Participants")
+ return
+ }
+ if zb0009 {
+ (*z).ProverPersistedFields.Participants = nil
+ } else if (*z).ProverPersistedFields.Participants != nil && cap((*z).ProverPersistedFields.Participants) >= zb0008 {
+ (*z).ProverPersistedFields.Participants = ((*z).ProverPersistedFields.Participants)[:zb0008]
+ } else {
+ (*z).ProverPersistedFields.Participants = make([]basics.Participant, zb0008)
+ }
+ for zb0002 := range (*z).ProverPersistedFields.Participants {
+ bts, err = (*z).ProverPersistedFields.Participants[zb0002].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Participants", zb0002)
+ return
+ }
+ }
+ case "parttree":
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ (*z).ProverPersistedFields.Parttree = nil
+ } else {
+ if (*z).ProverPersistedFields.Parttree == nil {
+ (*z).ProverPersistedFields.Parttree = new(merklearray.Tree)
+ }
+ bts, err = (*z).ProverPersistedFields.Parttree.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Parttree")
+ return
+ }
+ }
+ case "lnprv":
+ (*z).ProverPersistedFields.LnProvenWeight, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "LnProvenWeight")
+ return
+ }
+ case "prv":
+ (*z).ProverPersistedFields.ProvenWeight, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ProvenWeight")
+ return
+ }
+ case "str":
+ (*z).ProverPersistedFields.StrengthTarget, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "StrengthTarget")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *Prover) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*Prover)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *Prover) Msgsize() (s int) {
+ s = 1 + 5 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 4 + msgp.Uint64Size + 6 + msgp.ArrayHeaderSize
+ for zb0002 := range (*z).ProverPersistedFields.Participants {
+ s += (*z).ProverPersistedFields.Participants[zb0002].Msgsize()
+ }
+ s += 9
+ if (*z).ProverPersistedFields.Parttree == nil {
+ s += msgp.NilSize
+ } else {
+ s += (*z).ProverPersistedFields.Parttree.Msgsize()
+ }
+ s += 6 + msgp.Uint64Size + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *Prover) MsgIsZero() bool {
+ return ((*z).ProverPersistedFields.Data == (MessageHash{})) && ((*z).ProverPersistedFields.Round == 0) && (len((*z).ProverPersistedFields.Participants) == 0) && ((*z).ProverPersistedFields.Parttree == nil) && ((*z).ProverPersistedFields.LnProvenWeight == 0) && ((*z).ProverPersistedFields.ProvenWeight == 0) && ((*z).ProverPersistedFields.StrengthTarget == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *ProverPersistedFields) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0003Len := uint32(7)
+ var zb0003Mask uint8 /* 8 bits */
+ if (*z).Data == (MessageHash{}) {
+ zb0003Len--
+ zb0003Mask |= 0x2
+ }
+ if (*z).LnProvenWeight == 0 {
+ zb0003Len--
+ zb0003Mask |= 0x4
+ }
+ if len((*z).Participants) == 0 {
+ zb0003Len--
+ zb0003Mask |= 0x8
+ }
+ if (*z).Parttree == nil {
+ zb0003Len--
+ zb0003Mask |= 0x10
+ }
+ if (*z).ProvenWeight == 0 {
+ zb0003Len--
+ zb0003Mask |= 0x20
+ }
+ if (*z).Round == 0 {
+ zb0003Len--
+ zb0003Mask |= 0x40
+ }
+ if (*z).StrengthTarget == 0 {
+ zb0003Len--
+ zb0003Mask |= 0x80
+ }
+ // variable map header, size zb0003Len
+ o = append(o, 0x80|uint8(zb0003Len))
+ if zb0003Len != 0 {
+ if (zb0003Mask & 0x2) == 0 { // if not empty
+ // string "data"
+ o = append(o, 0xa4, 0x64, 0x61, 0x74, 0x61)
+ o = msgp.AppendBytes(o, ((*z).Data)[:])
+ }
+ if (zb0003Mask & 0x4) == 0 { // if not empty
+ // string "lnprv"
+ o = append(o, 0xa5, 0x6c, 0x6e, 0x70, 0x72, 0x76)
+ o = msgp.AppendUint64(o, (*z).LnProvenWeight)
+ }
+ if (zb0003Mask & 0x8) == 0 { // if not empty
+ // string "parts"
+ o = append(o, 0xa5, 0x70, 0x61, 0x72, 0x74, 0x73)
+ if (*z).Participants == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).Participants)))
+ }
+ for zb0002 := range (*z).Participants {
+ o = (*z).Participants[zb0002].MarshalMsg(o)
+ }
+ }
+ if (zb0003Mask & 0x10) == 0 { // if not empty
+ // string "parttree"
+ o = append(o, 0xa8, 0x70, 0x61, 0x72, 0x74, 0x74, 0x72, 0x65, 0x65)
+ if (*z).Parttree == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = (*z).Parttree.MarshalMsg(o)
+ }
+ }
+ if (zb0003Mask & 0x20) == 0 { // if not empty
+ // string "prv"
+ o = append(o, 0xa3, 0x70, 0x72, 0x76)
+ o = msgp.AppendUint64(o, (*z).ProvenWeight)
+ }
+ if (zb0003Mask & 0x40) == 0 { // if not empty
+ // string "rnd"
+ o = append(o, 0xa3, 0x72, 0x6e, 0x64)
+ o = msgp.AppendUint64(o, (*z).Round)
+ }
+ if (zb0003Mask & 0x80) == 0 { // if not empty
+ // string "str"
+ o = append(o, 0xa3, 0x73, 0x74, 0x72)
+ o = msgp.AppendUint64(o, (*z).StrengthTarget)
+ }
+ }
+ return
+}
+
+func (_ *ProverPersistedFields) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*ProverPersistedFields)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *ProverPersistedFields) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = msgp.ReadExactBytes(bts, ((*z).Data)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Data")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ (*z).Round, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Round")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Participants")
+ return
+ }
+ if zb0005 > VotersAllocBound {
+ err = msgp.ErrOverflow(uint64(zb0005), uint64(VotersAllocBound))
+ err = msgp.WrapError(err, "struct-from-array", "Participants")
+ return
+ }
+ if zb0006 {
+ (*z).Participants = nil
+ } else if (*z).Participants != nil && cap((*z).Participants) >= zb0005 {
+ (*z).Participants = ((*z).Participants)[:zb0005]
+ } else {
+ (*z).Participants = make([]basics.Participant, zb0005)
+ }
+ for zb0002 := range (*z).Participants {
+ bts, err = (*z).Participants[zb0002].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Participants", zb0002)
+ return
+ }
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ (*z).Parttree = nil
+ } else {
+ if (*z).Parttree == nil {
+ (*z).Parttree = new(merklearray.Tree)
+ }
+ bts, err = (*z).Parttree.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Parttree")
+ return
+ }
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ (*z).LnProvenWeight, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "LnProvenWeight")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ (*z).ProvenWeight, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ProvenWeight")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ (*z).StrengthTarget, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "StrengthTarget")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0004 {
+ (*z) = ProverPersistedFields{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "data":
+ bts, err = msgp.ReadExactBytes(bts, ((*z).Data)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "Data")
+ return
+ }
+ case "rnd":
+ (*z).Round, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Round")
+ return
+ }
+ case "parts":
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Participants")
+ return
+ }
+ if zb0007 > VotersAllocBound {
+ err = msgp.ErrOverflow(uint64(zb0007), uint64(VotersAllocBound))
+ err = msgp.WrapError(err, "Participants")
+ return
+ }
+ if zb0008 {
+ (*z).Participants = nil
+ } else if (*z).Participants != nil && cap((*z).Participants) >= zb0007 {
+ (*z).Participants = ((*z).Participants)[:zb0007]
+ } else {
+ (*z).Participants = make([]basics.Participant, zb0007)
+ }
+ for zb0002 := range (*z).Participants {
+ bts, err = (*z).Participants[zb0002].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Participants", zb0002)
+ return
+ }
+ }
+ case "parttree":
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ (*z).Parttree = nil
+ } else {
+ if (*z).Parttree == nil {
+ (*z).Parttree = new(merklearray.Tree)
+ }
+ bts, err = (*z).Parttree.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Parttree")
+ return
+ }
+ }
+ case "lnprv":
+ (*z).LnProvenWeight, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "LnProvenWeight")
+ return
+ }
+ case "prv":
+ (*z).ProvenWeight, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ProvenWeight")
+ return
+ }
+ case "str":
+ (*z).StrengthTarget, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "StrengthTarget")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *ProverPersistedFields) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*ProverPersistedFields)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *ProverPersistedFields) Msgsize() (s int) {
+ s = 1 + 5 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 4 + msgp.Uint64Size + 6 + msgp.ArrayHeaderSize
+ for zb0002 := range (*z).Participants {
+ s += (*z).Participants[zb0002].Msgsize()
+ }
+ s += 9
+ if (*z).Parttree == nil {
+ s += msgp.NilSize
+ } else {
+ s += (*z).Parttree.Msgsize()
+ }
+ s += 6 + msgp.Uint64Size + 4 + msgp.Uint64Size + 4 + msgp.Uint64Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *ProverPersistedFields) MsgIsZero() bool {
+ return ((*z).Data == (MessageHash{})) && ((*z).Round == 0) && (len((*z).Participants) == 0) && ((*z).Parttree == nil) && ((*z).LnProvenWeight == 0) && ((*z).ProvenWeight == 0) && ((*z).StrengthTarget == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *Reveal) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
diff --git a/crypto/stateproof/msgp_gen_test.go b/crypto/stateproof/msgp_gen_test.go
index 30dd0b1d5..2812908b3 100644
--- a/crypto/stateproof/msgp_gen_test.go
+++ b/crypto/stateproof/msgp_gen_test.go
@@ -74,6 +74,126 @@ func BenchmarkUnmarshalMessageHash(b *testing.B) {
}
}
+func TestMarshalUnmarshalProver(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := Prover{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingProver(t *testing.T) {
+ protocol.RunEncodingTest(t, &Prover{})
+}
+
+func BenchmarkMarshalMsgProver(b *testing.B) {
+ v := Prover{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgProver(b *testing.B) {
+ v := Prover{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalProver(b *testing.B) {
+ v := Prover{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalProverPersistedFields(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := ProverPersistedFields{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingProverPersistedFields(t *testing.T) {
+ protocol.RunEncodingTest(t, &ProverPersistedFields{})
+}
+
+func BenchmarkMarshalMsgProverPersistedFields(b *testing.B) {
+ v := ProverPersistedFields{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgProverPersistedFields(b *testing.B) {
+ v := ProverPersistedFields{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalProverPersistedFields(b *testing.B) {
+ v := ProverPersistedFields{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalReveal(t *testing.T) {
partitiontest.PartitionTest(t)
v := Reveal{}
diff --git a/crypto/stateproof/builder.go b/crypto/stateproof/prover.go
index 0e86fa894..64484ce35 100644
--- a/crypto/stateproof/builder.go
+++ b/crypto/stateproof/prover.go
@@ -26,7 +26,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
)
-// Errors for the StateProof builder
+// Errors for the StateProof prover
var (
ErrPositionOutOfBound = errors.New("requested position is out of bounds")
ErrPositionAlreadyPresent = errors.New("requested position is already present")
@@ -34,48 +34,60 @@ var (
ErrCoinIndexError = errors.New("could not find corresponding index for a given coin")
)
-// Builder keeps track of signatures on a message and eventually produces
+// VotersAllocBound should be equal to config.Consensus[protocol.ConsensusCurrentVersion].StateProofTopVoters
+const VotersAllocBound = 1024
+
+// ProverPersistedFields is the set of fields from the crypto state proof prover that are persisted to disk.
+type ProverPersistedFields struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+ Data MessageHash `codec:"data"`
+ Round uint64 `codec:"rnd"`
+ Participants []basics.Participant `codec:"parts,allocbound=VotersAllocBound"`
+ Parttree *merklearray.Tree `codec:"parttree"`
+ LnProvenWeight uint64 `codec:"lnprv"`
+ ProvenWeight uint64 `codec:"prv"`
+ StrengthTarget uint64 `codec:"str"`
+}
+
+// Prover keeps track of signatures on a message and eventually produces
// a state proof for that message.
-type Builder struct {
- data MessageHash
- round uint64
- sigs []sigslot // Indexed by pos in participants
- signedWeight uint64 // Total weight of signatures so far
- participants []basics.Participant
- parttree *merklearray.Tree
- lnProvenWeight uint64
- provenWeight uint64
- strengthTarget uint64
- cachedProof *StateProof
+type Prover struct {
+ ProverPersistedFields
+ sigs []sigslot // Indexed by pos in Participants
+ signedWeight uint64 // Total weight of signatures so far
+ cachedProof *StateProof
}
-// MakeBuilder constructs an empty builder. After adding enough signatures and signed weight, this builder is used to create a stateproof.
-func MakeBuilder(data MessageHash, round uint64, provenWeight uint64, part []basics.Participant, parttree *merklearray.Tree, strengthTarget uint64) (*Builder, error) {
+// MakeProver constructs an empty sp-prover. After adding enough signatures and signed weight, this sp-prover is used to create a stateproof.
+func MakeProver(data MessageHash, round uint64, provenWeight uint64, part []basics.Participant, parttree *merklearray.Tree, strengthTarget uint64) (*Prover, error) {
npart := len(part)
lnProvenWt, err := LnIntApproximation(provenWeight)
if err != nil {
return nil, err
}
- b := &Builder{
- data: data,
- round: round,
- sigs: make([]sigslot, npart),
- signedWeight: 0,
- participants: part,
- parttree: parttree,
- lnProvenWeight: lnProvenWt,
- provenWeight: provenWeight,
- strengthTarget: strengthTarget,
- cachedProof: nil,
+ b := &Prover{
+ ProverPersistedFields: ProverPersistedFields{
+ Data: data,
+ Round: round,
+ Participants: part,
+ Parttree: parttree,
+ LnProvenWeight: lnProvenWt,
+ ProvenWeight: provenWeight,
+ StrengthTarget: strengthTarget,
+ },
+
+ sigs: make([]sigslot, npart),
+ signedWeight: 0,
+ cachedProof: nil,
}
return b, nil
}
-// Present checks if the builder already contains a signature at a particular
+// Present checks if the prover already contains a signature at a particular
// offset.
-func (b *Builder) Present(pos uint64) (bool, error) {
+func (b *Prover) Present(pos uint64) (bool, error) {
if pos >= uint64(len(b.sigs)) {
return false, fmt.Errorf("%w pos %d >= len(b.sigs) %d", ErrPositionOutOfBound, pos, len(b.sigs))
}
@@ -83,17 +95,17 @@ func (b *Builder) Present(pos uint64) (bool, error) {
return b.sigs[pos].Weight != 0, nil
}
-// IsValid verifies that the participant along with the signature can be inserted to the builder.
+// IsValid verifies that the participant along with the signature can be inserted to the prover.
// verifySig can be set to false when the signature is already verified (e.g. loaded from the DB)
-func (b *Builder) IsValid(pos uint64, sig *merklesignature.Signature, verifySig bool) error {
- if pos >= uint64(len(b.participants)) {
- return fmt.Errorf("%w pos %d >= len(participants) %d", ErrPositionOutOfBound, pos, len(b.participants))
+func (b *Prover) IsValid(pos uint64, sig *merklesignature.Signature, verifySig bool) error {
+ if pos >= uint64(len(b.Participants)) {
+ return fmt.Errorf("%w pos %d >= len(participants) %d", ErrPositionOutOfBound, pos, len(b.Participants))
}
- p := b.participants[pos]
+ p := b.Participants[pos]
if p.Weight == 0 {
- return fmt.Errorf("builder.IsValid: %w: position = %d", ErrPositionWithZeroWeight, pos)
+ return fmt.Errorf("prover.IsValid: %w: position = %d", ErrPositionWithZeroWeight, pos)
}
// Check signature
@@ -101,7 +113,7 @@ func (b *Builder) IsValid(pos uint64, sig *merklesignature.Signature, verifySig
if err := sig.ValidateSaltVersion(merklesignature.SchemeSaltVersion); err != nil {
return err
}
- if err := p.PK.VerifyBytes(b.round, b.data[:], sig); err != nil {
+ if err := p.PK.VerifyBytes(b.Round, b.Data[:], sig); err != nil {
return err
}
}
@@ -109,7 +121,7 @@ func (b *Builder) IsValid(pos uint64, sig *merklesignature.Signature, verifySig
}
// Add a signature to the set of signatures available for building a proof.
-func (b *Builder) Add(pos uint64, sig merklesignature.Signature) error {
+func (b *Prover) Add(pos uint64, sig merklesignature.Signature) error {
isPresent, err := b.Present(pos)
if err != nil {
return err
@@ -118,7 +130,7 @@ func (b *Builder) Add(pos uint64, sig merklesignature.Signature) error {
return ErrPositionAlreadyPresent
}
- p := b.participants[pos]
+ p := b.Participants[pos]
// Remember the signature
b.sigs[pos].Weight = p.Weight
@@ -129,12 +141,12 @@ func (b *Builder) Add(pos uint64, sig merklesignature.Signature) error {
}
// Ready returns whether the state proof is ready to be built.
-func (b *Builder) Ready() bool {
- return b.cachedProof != nil || b.signedWeight > b.provenWeight
+func (b *Prover) Ready() bool {
+ return b.cachedProof != nil || b.signedWeight > b.ProvenWeight
}
// SignedWeight returns the total weight of signatures added so far.
-func (b *Builder) SignedWeight() uint64 {
+func (b *Prover) SignedWeight() uint64 {
return b.signedWeight
}
@@ -144,7 +156,7 @@ func (b *Builder) SignedWeight() uint64 {
// coinWeight.
//
// coinIndex works by doing a binary search on the sigs array.
-func (b *Builder) coinIndex(coinWeight uint64) (uint64, error) {
+func (b *Prover) coinIndex(coinWeight uint64) (uint64, error) {
lo := uint64(0)
hi := uint64(len(b.sigs))
@@ -167,15 +179,15 @@ again:
goto again
}
-// Build returns a state proof, if the builder has accumulated
+// CreateProof returns a state proof, if the prover has accumulated
// enough signatures to construct it.
-func (b *Builder) Build() (*StateProof, error) {
+func (b *Prover) CreateProof() (*StateProof, error) {
if b.cachedProof != nil {
return b.cachedProof, nil
}
if !b.Ready() {
- return nil, fmt.Errorf("%w: %d <= %d", ErrSignedWeightLessThanProvenWeight, b.signedWeight, b.provenWeight)
+ return nil, fmt.Errorf("%w: %d <= %d", ErrSignedWeightLessThanProvenWeight, b.signedWeight, b.ProvenWeight)
}
// Commit to the sigs array
@@ -197,17 +209,17 @@ func (b *Builder) Build() (*StateProof, error) {
MerkleSignatureSaltVersion: merklesignature.SchemeSaltVersion,
}
- nr, err := numReveals(b.signedWeight, b.lnProvenWeight, b.strengthTarget)
+ nr, err := numReveals(b.signedWeight, b.LnProvenWeight, b.StrengthTarget)
if err != nil {
return nil, err
}
choice := coinChoiceSeed{
- partCommitment: b.parttree.Root(),
- lnProvenWeight: b.lnProvenWeight,
+ partCommitment: b.Parttree.Root(),
+ lnProvenWeight: b.LnProvenWeight,
sigCommitment: s.SigCommit,
signedWeight: s.SignedWeight,
- data: b.data,
+ data: b.Data,
}
coinHash := makeCoinGenerator(&choice)
@@ -221,8 +233,8 @@ func (b *Builder) Build() (*StateProof, error) {
return nil, err
}
- if pos >= uint64(len(b.participants)) {
- return nil, fmt.Errorf("%w pos %d >= len(participants) %d", ErrPositionOutOfBound, pos, len(b.participants))
+ if pos >= uint64(len(b.Participants)) {
+ return nil, fmt.Errorf("%w pos %d >= len(participants) %d", ErrPositionOutOfBound, pos, len(b.Participants))
}
revealsSequence[j] = pos
@@ -236,7 +248,7 @@ func (b *Builder) Build() (*StateProof, error) {
// Generate the reveal for pos
s.Reveals[pos] = Reveal{
SigSlot: b.sigs[pos].sigslotCommit,
- Part: b.participants[pos],
+ Part: b.Participants[pos],
}
proofPositions = append(proofPositions, pos)
@@ -247,7 +259,7 @@ func (b *Builder) Build() (*StateProof, error) {
return nil, err
}
- partProofs, err := b.parttree.Prove(proofPositions)
+ partProofs, err := b.Parttree.Prove(proofPositions)
if err != nil {
return nil, err
}
@@ -258,3 +270,8 @@ func (b *Builder) Build() (*StateProof, error) {
b.cachedProof = s
return s, nil
}
+
+// AllocSigs should only be used after decoding msgpacked Prover, as the sigs field is not exported and encoded
+func (b *Prover) AllocSigs() {
+ b.sigs = make([]sigslot, len(b.Participants))
+}
diff --git a/crypto/stateproof/builder_test.go b/crypto/stateproof/prover_test.go
index 6938a4b17..6d374e059 100644
--- a/crypto/stateproof/builder_test.go
+++ b/crypto/stateproof/prover_test.go
@@ -28,6 +28,7 @@ import (
"github.com/algorand/falcon"
+ "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merklearray"
"github.com/algorand/go-algorand/crypto/merklesignature"
@@ -50,7 +51,7 @@ type paramsForTest struct {
partCommitment crypto.GenericDigest
numberOfParticipnets uint64
data MessageHash
- builder *Builder
+ builder *Prover
sig merklesignature.Signature
}
@@ -121,7 +122,7 @@ func generateProofForTesting(a *require.Assertions, doLargeTest bool) paramsForT
partcom, err := merklearray.BuildVectorCommitmentTree(basics.ParticipantsArray(parts), crypto.HashFactory{HashType: HashType})
a.NoError(err)
- b, err := MakeBuilder(data, stateProofIntervalForTests, uint64(totalWeight/2), parts, partcom, stateProofStrengthTargetForTests)
+ b, err := MakeProver(data, stateProofIntervalForTests, uint64(totalWeight/2), parts, partcom, stateProofStrengthTargetForTests)
a.NoError(err)
for i := uint64(0); i < uint64(npart)/2+10; i++ { // leave some signature to be added later in the test (if needed)
@@ -135,7 +136,7 @@ func generateProofForTesting(a *require.Assertions, doLargeTest bool) paramsForT
a.True(isPresent)
}
- proof, err := b.Build()
+ proof, err := b.CreateProof()
a.NoError(err)
p := paramsForTest{
@@ -284,7 +285,7 @@ func TestSignatureCommitmentBinaryFormat(t *testing.T) {
partcom, err := merklearray.BuildVectorCommitmentTree(basics.ParticipantsArray(parts), crypto.HashFactory{HashType: HashType})
a.NoError(err)
- b, err := MakeBuilder(data, stateProofIntervalForTests, uint64(totalWeight/(2*numPart)), parts, partcom, stateProofStrengthTargetForTests)
+ b, err := MakeProver(data, stateProofIntervalForTests, uint64(totalWeight/(2*numPart)), parts, partcom, stateProofStrengthTargetForTests)
a.NoError(err)
for i := 0; i < numPart; i++ {
@@ -293,7 +294,7 @@ func TestSignatureCommitmentBinaryFormat(t *testing.T) {
b.Add(uint64(i), sigs[i])
}
- sProof, err := b.Build()
+ sProof, err := b.CreateProof()
a.NoError(err)
leaf0 := calculateHashOnSigLeaf(t, sigs[0], findLInProof(a, sigs[0], sProof))
@@ -465,7 +466,7 @@ func TestBuilder_AddRejectsInvalidSigVersion(t *testing.T) {
partcom, err := merklearray.BuildVectorCommitmentTree(basics.ParticipantsArray(parts), crypto.HashFactory{HashType: HashType})
a.NoError(err)
- builder, err := MakeBuilder(data, stateProofIntervalForTests, uint64(totalWeight/2), parts, partcom, stateProofStrengthTargetForTests)
+ builder, err := MakeProver(data, stateProofIntervalForTests, uint64(totalWeight/2), parts, partcom, stateProofStrengthTargetForTests)
a.NoError(err)
// actual test:
@@ -489,21 +490,21 @@ func TestBuildAndReady(t *testing.T) {
partcom, err := merklearray.BuildVectorCommitmentTree(basics.ParticipantsArray(parts), crypto.HashFactory{HashType: HashType})
a.NoError(err)
- builder, err := MakeBuilder(data, stateProofIntervalForTests, uint64(totalWeight/2), parts, partcom, stateProofStrengthTargetForTests)
+ builder, err := MakeProver(data, stateProofIntervalForTests, uint64(totalWeight/2), parts, partcom, stateProofStrengthTargetForTests)
a.NoError(err)
a.False(builder.Ready())
- _, err = builder.Build()
+ _, err = builder.CreateProof()
a.ErrorIs(err, ErrSignedWeightLessThanProvenWeight)
- builder.signedWeight = builder.provenWeight
+ builder.signedWeight = builder.ProvenWeight
a.False(builder.Ready())
- _, err = builder.Build()
+ _, err = builder.CreateProof()
a.ErrorIs(err, ErrSignedWeightLessThanProvenWeight)
- builder.signedWeight = builder.provenWeight + 1
+ builder.signedWeight = builder.ProvenWeight + 1
a.True(builder.Ready())
- _, err = builder.Build()
+ _, err = builder.CreateProof()
a.NotErrorIs(err, ErrSignedWeightLessThanProvenWeight)
}
@@ -512,11 +513,11 @@ func TestErrorCases(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
- builder := Builder{}
+ builder := Prover{}
_, err := builder.Present(1)
a.ErrorIs(err, ErrPositionOutOfBound)
- builder.participants = make([]basics.Participant, 1, 1)
+ builder.Participants = make([]basics.Participant, 1, 1)
builder.sigs = make([]sigslot, 1, 1)
err = builder.IsValid(1, &merklesignature.Signature{}, false)
a.ErrorIs(err, ErrPositionOutOfBound)
@@ -524,11 +525,11 @@ func TestErrorCases(t *testing.T) {
err = builder.IsValid(0, &merklesignature.Signature{}, false)
require.ErrorIs(t, err, ErrPositionWithZeroWeight)
- builder.participants[0].Weight = 1
+ builder.Participants[0].Weight = 1
err = builder.IsValid(0, &merklesignature.Signature{}, true)
a.ErrorIs(err, merklesignature.ErrKeyLifetimeIsZero)
- builder.participants[0].PK.KeyLifetime = 20
+ builder.Participants[0].PK.KeyLifetime = 20
err = builder.IsValid(0, &merklesignature.Signature{}, true)
a.ErrorIs(err, merklesignature.ErrSignatureSchemeVerificationFailed)
@@ -541,7 +542,7 @@ func TestErrorCases(t *testing.T) {
}
func checkSigsArray(n int, a *require.Assertions) {
- b := &Builder{
+ b := &Prover{
sigs: make([]sigslot, n),
}
for i := 0; i < n; i++ {
@@ -577,7 +578,7 @@ func TestCoinIndexBetweenWeights(t *testing.T) {
a := require.New(t)
n := 1000
- b := &Builder{
+ b := &Prover{
sigs: make([]sigslot, n),
}
for i := 0; i < n; i++ {
@@ -604,7 +605,7 @@ func TestBuilderWithZeroProvenWeight(t *testing.T) {
data := testMessage("hello world").IntoStateProofMessageHash()
- _, err := MakeBuilder(data, stateProofIntervalForTests, 0, nil, nil, stateProofStrengthTargetForTests)
+ _, err := MakeProver(data, stateProofIntervalForTests, 0, nil, nil, stateProofStrengthTargetForTests)
a.ErrorIs(err, ErrIllegalInputForLnApprox)
}
@@ -614,23 +615,30 @@ func TestBuilder_BuildStateProofCache(t *testing.T) {
a := require.New(t)
p := generateProofForTesting(a, true)
sp1 := &p.sp
- sp2, err := p.builder.Build()
+ sp2, err := p.builder.CreateProof()
a.NoError(err)
a.Equal(sp1, sp2) // already built, no signatures added
err = p.builder.Add(p.numberOfParticipnets-1, p.sig)
a.NoError(err)
- sp3, err := p.builder.Build()
+ sp3, err := p.builder.CreateProof()
a.NoError(err)
a.NotEqual(sp1, sp3) // better StateProof with added signature should have been built
- sp4, err := p.builder.Build()
+ sp4, err := p.builder.CreateProof()
a.NoError(err)
a.Equal(sp3, sp4)
return
}
+// Verifies that the VotersAllocBound constant is equal to the current consensus parameters.
+// It is used for msgpack allocbound (needs to be static)
+func TestBuilder_StateProofTopVoters(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ require.Equal(t, config.Consensus[protocol.ConsensusCurrentVersion].StateProofTopVoters, uint64(VotersAllocBound))
+}
+
func BenchmarkBuildVerify(b *testing.B) {
totalWeight := 1000000
npart := 1000
@@ -667,7 +675,7 @@ func BenchmarkBuildVerify(b *testing.B) {
b.Run("AddBuild", func(b *testing.B) {
for i := 0; i < b.N; i++ {
- builder, err := MakeBuilder(data, stateProofIntervalForTests, provenWeight, parts, partcom, stateProofStrengthTargetForTests)
+ builder, err := MakeProver(data, stateProofIntervalForTests, provenWeight, parts, partcom, stateProofStrengthTargetForTests)
if err != nil {
b.Error(err)
}
@@ -678,7 +686,7 @@ func BenchmarkBuildVerify(b *testing.B) {
builder.Add(uint64(i), sigs[i])
}
- sp, err = builder.Build()
+ sp, err = builder.CreateProof()
if err != nil {
b.Error(err)
}
diff --git a/crypto/util.go b/crypto/util.go
index aa8dd3cfc..60bb12aef 100644
--- a/crypto/util.go
+++ b/crypto/util.go
@@ -94,3 +94,10 @@ func HashObj(h Hashable) Digest {
func NewHash() hash.Hash {
return sha512.New512_256()
}
+
+// EncodeAndHash returns both the packed representation of the object and its hash.
+func EncodeAndHash(h Hashable) (Digest, []byte) {
+ hashid, encodedData := h.ToBeHashed()
+ hashrep := append([]byte(hashid), encodedData...)
+ return Hash(hashrep), encodedData
+}
diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json
index ce40f9ab6..dfeaaa01b 100644
--- a/daemon/algod/api/algod.oas2.json
+++ b/daemon/algod/api/algod.oas2.json
@@ -47,6 +47,36 @@
}
}
},
+ "/ready": {
+ "get": {
+ "tags": [
+ "public",
+ "common"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "scheme": [
+ "http"
+ ],
+ "summary": "Returns OK if healthy and fully caught up.",
+ "operationId": "GetReady",
+ "responses": {
+ "200": {
+ "description": "OK."
+ },
+ "500": {
+ "description": "Internal Error"
+ },
+ "503": {
+ "description": "Node not ready yet"
+ },
+ "default": {
+ "description": "Unknown Error"
+ }
+ }
+ }
+ },
"/metrics": {
"get": {
"tags": [
@@ -1212,7 +1242,8 @@
"nonparticipating"
],
"consumes": [
- "application/x-binary"
+ "application/json",
+ "application/msgpack"
],
"produces": [
"application/json",
@@ -1225,13 +1256,12 @@
"operationId": "SimulateTransaction",
"parameters": [
{
- "description": "The byte encoded transaction to simulate",
- "name": "rawtxn",
+ "description": "The transactions to simulate, along with any other inputs.",
+ "name": "request",
"in": "body",
"required": true,
"schema": {
- "type": "string",
- "format": "binary"
+ "$ref": "#/definitions/SimulateRequest"
}
},
{
@@ -1243,7 +1273,7 @@
"$ref": "#/responses/SimulateResponse"
},
"400": {
- "description": "Bad Request - Malformed Algorand transaction",
+ "description": "Bad Request",
"schema": {
"$ref": "#/definitions/ErrorResponse"
}
@@ -1254,9 +1284,6 @@
"$ref": "#/definitions/ErrorResponse"
}
},
- "404": {
- "description": "Transaction simulator not enabled"
- },
"500": {
"description": "Internal Error",
"schema": {
@@ -1442,8 +1469,7 @@
"description": "Get ledger deltas for a round.",
"tags": [
"public",
- "data",
- "experimental"
+ "nonparticipating"
],
"produces": [
"application/json",
@@ -1507,6 +1533,144 @@
}
}
},
+ "/v2/deltas/{round}/txn/group": {
+ "get": {
+ "description": "Get ledger deltas for transaction groups in a given round.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
+ "produces": [
+ "application/json",
+ "application/msgpack"
+ ],
+ "schemes": [
+ "http"
+ ],
+ "summary": "Get LedgerStateDelta objects for all transaction groups in a given round",
+ "operationId": "GetTransactionGroupLedgerStateDeltasForRound",
+ "parameters": [
+ {
+ "type": "integer",
+ "description": "The round for which the deltas are desired.",
+ "name": "round",
+ "in": "path",
+ "required": true,
+ "minimum": 0
+ },
+ {
+ "$ref": "#/parameters/format"
+ }
+ ],
+ "responses": {
+ "200": {
+ "$ref": "#/responses/TransactionGroupLedgerStateDeltasForRoundResponse"
+ },
+ "401": {
+ "description": "Invalid API Token",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "404": {
+ "description": "Could not find deltas for round",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "408": {
+ "description": "timed out on request",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal Error",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "501": {
+ "description": "Not Implemented",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "default": {
+ "description": "Unknown Error"
+ }
+ }
+ }
+ },
+ "/v2/deltas/txn/group/{id}": {
+ "get": {
+ "description": "Get a ledger delta for a given transaction group.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
+ "produces": [
+ "application/json",
+ "application/msgpack"
+ ],
+ "schemes": [
+ "http"
+ ],
+ "summary": "Get a LedgerStateDelta object for a given transaction group",
+ "operationId": "GetLedgerStateDeltaForTransactionGroup",
+ "parameters": [
+ {
+ "pattern": "[A-Z0-9]+",
+ "type": "string",
+ "description": "A transaction ID, or transaction group ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "$ref": "#/parameters/format"
+ }
+ ],
+ "responses": {
+ "200": {
+ "$ref": "#/responses/LedgerStateDeltaForTransactionGroupResponse"
+ },
+ "401": {
+ "description": "Invalid API Token",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "404": {
+ "description": "Could not find a delta for transaction ID or group ID",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "408": {
+ "description": "timed out on request",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal Error",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "501": {
+ "description": "Not Implemented",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "default": {
+ "description": "Unknown Error"
+ }
+ }
+ }
+ },
"/v2/stateproofs/{round}": {
"get": {
"tags": [
@@ -1787,7 +1951,7 @@
},
"/v2/applications/{application-id}/box": {
"get": {
- "description": "Given an application ID and box name, it returns the box name and value (each base64 encoded). Box names must be in the goal app call arg encoding form 'encoding:value'. For ints, use the form 'int:1234'. For raw bytes, use the form 'b64:A=='. For printable strings, use the form 'str:hello'. For addresses, use the form 'addr:XYZ...'.",
+ "description": "Given an application ID and box name, it returns the round, box name, and value (each base64 encoded). Box names must be in the goal app call arg encoding form 'encoding:value'. For ints, use the form 'int:1234'. For raw bytes, use the form 'b64:A=='. For printable strings, use the form 'str:hello'. For addresses, use the form 'addr:XYZ...'.",
"tags": [
"public",
"nonparticipating"
@@ -2402,6 +2566,89 @@
}
}
}
+ },
+ "/v2/devmode/blocks/offset": {
+ "get": {
+ "description": "Gets the current timestamp offset.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "schemes": [
+ "http"
+ ],
+ "summary": "Returns the timestamp offset. Timestamp offsets can only be set in dev mode.",
+ "operationId": "GetBlockTimeStampOffset",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "$ref": "#/responses/GetBlockTimeStampOffsetResponse"
+ },
+ "400": {
+ "description": "TimeStamp offset not set.",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "default": {
+ "description": "Unknown Error"
+ }
+ }
+ }
+ },
+ "/v2/devmode/blocks/offset/{offset}": {
+ "post": {
+ "description": "Sets the timestamp offset (seconds) for blocks in dev mode. Providing an offset of 0 will unset this value and try to use the real clock for the timestamp.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ],
+ "schemes": [
+ "http"
+ ],
+ "summary": "Given a timestamp offset in seconds, adds the offset to every subsequent block header's timestamp.",
+ "operationId": "SetBlockTimeStampOffset",
+ "parameters": [
+ {
+ "type": "integer",
+ "description": "The timestamp offset for blocks in dev mode.",
+ "name": "offset",
+ "in": "path",
+ "required": true,
+ "minimum": 0
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "type": "object"
+ },
+ "400": {
+ "description": "Cannot set timestamp offset to a negative integer.",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "401": {
+ "description": "Invalid API Token",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal Error",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "default": {
+ "description": "Unknown Error"
+ }
+ }
+ }
}
},
"definitions": {
@@ -2410,6 +2657,25 @@
"type": "object",
"x-algorand-format": "StateDelta"
},
+ "LedgerStateDeltaForTransactionGroup": {
+ "description": "Contains a ledger delta for a single transaction group",
+ "type": "object",
+ "required": [
+ "Delta",
+ "Ids"
+ ],
+ "properties": {
+ "Delta": {
+ "$ref": "#/definitions/LedgerStateDelta"
+ },
+ "Ids": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
"Account": {
"description": "Account information at a given round.\n\nDefinition:\ndata/basics/userBalance.go : AccountData\n",
"type": "object",
@@ -2946,15 +3212,15 @@
"type": "integer"
},
"local-state-schema": {
- "description": "[\\lsch\\] local schema",
+ "description": "\\[lsch\\] local schema",
"$ref": "#/definitions/ApplicationStateSchema"
},
"global-state-schema": {
- "description": "[\\gsch\\] global schema",
+ "description": "\\[gsch\\] global schema",
"$ref": "#/definitions/ApplicationStateSchema"
},
"global-state": {
- "description": "[\\gs\\] global schema",
+ "description": "\\[gs\\] global state",
"$ref": "#/definitions/TealKeyValueStore"
}
}
@@ -3162,14 +3428,66 @@
}
}
},
+ "SimulateRequest": {
+ "description": "Request type for simulation endpoint.",
+ "type": "object",
+ "required": [
+ "txn-groups"
+ ],
+ "properties": {
+ "txn-groups": {
+ "description": "The transaction groups to simulate.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/SimulateRequestTransactionGroup"
+ }
+ },
+ "allow-empty-signatures": {
+ "description": "Allow transactions without signatures to be simulated as if they had correct signatures.",
+ "type": "boolean"
+ },
+ "allow-more-logging": {
+ "description": "Lifts limits on log opcode usage during simulation.",
+ "type": "boolean"
+ },
+ "extra-opcode-budget": {
+ "description": "Applies extra opcode budget during simulation for each transaction group.",
+ "type": "integer"
+ }
+ }
+ },
+ "SimulateRequestTransactionGroup": {
+ "description": "A transaction group to simulate.",
+ "type": "object",
+ "required": [
+ "txns"
+ ],
+ "properties": {
+ "txns": {
+ "description": "An atomic transaction group.",
+ "type": "array",
+ "items": {
+ "description": "SignedTxn object. Must be canonically encoded.",
+ "type": "string",
+ "format": "json",
+ "x-algorand-format": "SignedTransaction"
+ }
+ }
+ }
+ },
"Box": {
"description": "Box name and its content.",
"type": "object",
"required": [
+ "round",
"name",
"value"
],
"properties": {
+ "round": {
+ "description": "The round for which this information is relevant",
+ "type": "integer"
+ },
"name": {
"description": "\\[name\\] box name, base64 encoded",
"type": "string",
@@ -3324,18 +3642,18 @@
"type": "integer"
},
"local-state-delta": {
- "description": "\\[ld\\] Local state key/value changes for the application being executed by this transaction.",
+ "description": "Local state key/value changes for the application being executed by this transaction.",
"type": "array",
"items": {
"$ref": "#/definitions/AccountStateDelta"
}
},
"global-state-delta": {
- "description": "\\[gd\\] Global state key/value changes for the application being executed by this transaction.",
+ "description": "Global state key/value changes for the application being executed by this transaction.",
"$ref": "#/definitions/StateDelta"
},
"logs": {
- "description": "\\[lg\\] Logs for the application being executed by this transaction.",
+ "description": "Logs for the application being executed by this transaction.",
"type": "array",
"items": {
"type": "string",
@@ -3380,6 +3698,14 @@
"items": {
"type": "integer"
}
+ },
+ "app-budget-added": {
+ "description": "Total budget added during execution of app calls in the transaction group.",
+ "type": "integer"
+ },
+ "app-budget-consumed": {
+ "description": "Total budget consumed during execution of app calls in the transaction group.",
+ "type": "integer"
}
}
},
@@ -3393,9 +3719,13 @@
"txn-result": {
"$ref": "#/definitions/PendingTransactionResponse"
},
- "missing-signature": {
- "description": "A boolean indicating whether this transaction is missing signatures",
- "type": "boolean"
+ "app-budget-consumed": {
+ "description": "Budget used during execution of an app call transaction. This value includes budged used by inner app calls spawned by this transaction.",
+ "type": "integer"
+ },
+ "logic-sig-budget-consumed": {
+ "description": "Budget used during execution of a logic sig transaction.",
+ "type": "integer"
}
}
},
@@ -3478,6 +3808,28 @@
"x-algorand-format": "uint64"
}
}
+ },
+ "SimulationEvalOverrides": {
+ "description": "The set of parameters and limits override during simulation. If this set of parameters is present, then evaluation parameters may differ from standard evaluation in certain ways.",
+ "type": "object",
+ "properties": {
+ "allow-empty-signatures": {
+ "description": "If true, transactions without signatures are allowed and simulated as if they were properly signed.",
+ "type": "boolean"
+ },
+ "max-log-calls": {
+ "description": "The maximum log calls one can make during simulation",
+ "type": "integer"
+ },
+ "max-log-size": {
+ "description": "The maximum byte number to log during simulation",
+ "type": "integer"
+ },
+ "extra-opcode-budget": {
+ "description": "The extra opcode budget added to each transaction group during simulation",
+ "type": "integer"
+ }
+ }
}
},
"parameters": {
@@ -3652,6 +4004,21 @@
}
},
"responses": {
+ "GetBlockTimeStampOffsetResponse": {
+ "description": "Response containing the timestamp offset in seconds",
+ "schema": {
+ "type": "object",
+ "required": [
+ "offset"
+ ],
+ "properties": {
+ "offset": {
+ "description": "Timestamp offset in seconds.",
+ "type": "integer"
+ }
+ }
+ }
+ },
"GetSyncRoundResponse": {
"description": "Response containing the ledger's minimum sync round",
"schema": {
@@ -3667,6 +4034,29 @@
}
}
},
+ "LedgerStateDeltaForTransactionGroupResponse": {
+ "description": "Response containing a ledger state delta for a single transaction group.",
+ "schema": {
+ "$ref": "#/definitions/LedgerStateDelta"
+ }
+ },
+ "TransactionGroupLedgerStateDeltasForRoundResponse": {
+ "description": "Response containing all ledger state deltas for transaction groups, with their associated Ids, in a single round.",
+ "schema": {
+ "type": "object",
+ "required": [
+ "Deltas"
+ ],
+ "properties": {
+ "Deltas": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/LedgerStateDeltaForTransactionGroup"
+ }
+ }
+ }
+ }
+ },
"LedgerStateDeltaResponse": {
"description": "Contains ledger deltas",
"schema": {
@@ -3966,7 +4356,7 @@
"type": "integer"
},
"upgrade-vote-rounds": {
- "description": "Total voting ounds for current upgrade",
+ "description": "Total voting rounds for current upgrade",
"type": "integer"
}
}
@@ -4049,8 +4439,7 @@
"required": [
"version",
"last-round",
- "txn-groups",
- "would-succeed"
+ "txn-groups"
],
"properties": {
"version": {
@@ -4068,9 +4457,8 @@
"$ref": "#/definitions/SimulateTransactionGroupResult"
}
},
- "would-succeed": {
- "description": "Indicates whether the simulated transactions would have succeeded during an actual submission. If any transaction fails or is missing a signature, this will be false.",
- "type": "boolean"
+ "eval-overrides": {
+ "$ref": "#/definitions/SimulationEvalOverrides"
}
}
}
diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml
index 11435901d..2c65bdde1 100644
--- a/daemon/algod/api/algod.oas3.yml
+++ b/daemon/algod/api/algod.oas3.yml
@@ -497,6 +497,25 @@
},
"description": "DryrunResponse contains per-txn debug information from a dryrun."
},
+ "GetBlockTimeStampOffsetResponse": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "properties": {
+ "offset": {
+ "description": "Timestamp offset in seconds.",
+ "type": "integer"
+ }
+ },
+ "required": [
+ "offset"
+ ],
+ "type": "object"
+ }
+ }
+ },
+ "description": "Response containing the timestamp offset in seconds"
+ },
"GetSyncRoundResponse": {
"content": {
"application/json": {
@@ -516,6 +535,16 @@
},
"description": "Response containing the ledger's minimum sync round"
},
+ "LedgerStateDeltaForTransactionGroupResponse": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/LedgerStateDelta"
+ }
+ }
+ },
+ "description": "Response containing a ledger state delta for a single transaction group."
+ },
"LedgerStateDeltaResponse": {
"content": {
"application/json": {
@@ -631,7 +660,7 @@
"type": "boolean"
},
"upgrade-vote-rounds": {
- "description": "Total voting ounds for current upgrade",
+ "description": "Total voting rounds for current upgrade",
"type": "integer"
},
"upgrade-votes": {
@@ -758,6 +787,9 @@
"application/json": {
"schema": {
"properties": {
+ "eval-overrides": {
+ "$ref": "#/components/schemas/SimulationEvalOverrides"
+ },
"last-round": {
"description": "The round immediately preceding this simulation. State changes through this round were used to run this simulation.",
"type": "integer"
@@ -772,17 +804,12 @@
"version": {
"description": "The version of this response object.",
"type": "integer"
- },
- "would-succeed": {
- "description": "Indicates whether the simulated transactions would have succeeded during an actual submission. If any transaction fails or is missing a signature, this will be false.",
- "type": "boolean"
}
},
"required": [
"last-round",
"txn-groups",
- "version",
- "would-succeed"
+ "version"
],
"type": "object"
}
@@ -830,6 +857,27 @@
},
"description": "Supply represents the current supply of MicroAlgos in the system."
},
+ "TransactionGroupLedgerStateDeltasForRoundResponse": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "properties": {
+ "Deltas": {
+ "items": {
+ "$ref": "#/components/schemas/LedgerStateDeltaForTransactionGroup"
+ },
+ "type": "array"
+ }
+ },
+ "required": [
+ "Deltas"
+ ],
+ "type": "object"
+ }
+ }
+ },
+ "description": "Response containing all ledger state deltas for transaction groups, with their associated Ids, in a single round."
+ },
"TransactionParametersResponse": {
"content": {
"application/json": {
@@ -1358,6 +1406,10 @@
"pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
"type": "string"
},
+ "round": {
+ "description": "The round for which this information is relevant",
+ "type": "integer"
+ },
"value": {
"description": "\\[value\\] box value, base64 encoded.",
"format": "byte",
@@ -1367,6 +1419,7 @@
},
"required": [
"name",
+ "round",
"value"
],
"type": "object"
@@ -1687,6 +1740,25 @@
"type": "object",
"x-algorand-format": "StateDelta"
},
+ "LedgerStateDeltaForTransactionGroup": {
+ "description": "Contains a ledger delta for a single transaction group",
+ "properties": {
+ "Delta": {
+ "$ref": "#/components/schemas/LedgerStateDelta"
+ },
+ "Ids": {
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "required": [
+ "Delta",
+ "Ids"
+ ],
+ "type": "object"
+ },
"LightBlockHeaderProof": {
"description": "Proof of membership and position of a light block header.",
"properties": {
@@ -1795,14 +1867,14 @@
"type": "array"
},
"local-state-delta": {
- "description": "\\[ld\\] Local state key/value changes for the application being executed by this transaction.",
+ "description": "Local state key/value changes for the application being executed by this transaction.",
"items": {
"$ref": "#/components/schemas/AccountStateDelta"
},
"type": "array"
},
"logs": {
- "description": "\\[lg\\] Logs for the application being executed by this transaction.",
+ "description": "Logs for the application being executed by this transaction.",
"items": {
"format": "byte",
"pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
@@ -1835,9 +1907,64 @@
],
"type": "object"
},
+ "SimulateRequest": {
+ "description": "Request type for simulation endpoint.",
+ "properties": {
+ "allow-empty-signatures": {
+ "description": "Allow transactions without signatures to be simulated as if they had correct signatures.",
+ "type": "boolean"
+ },
+ "allow-more-logging": {
+ "description": "Lifts limits on log opcode usage during simulation.",
+ "type": "boolean"
+ },
+ "extra-opcode-budget": {
+ "description": "Applies extra opcode budget during simulation for each transaction group.",
+ "type": "integer"
+ },
+ "txn-groups": {
+ "description": "The transaction groups to simulate.",
+ "items": {
+ "$ref": "#/components/schemas/SimulateRequestTransactionGroup"
+ },
+ "type": "array"
+ }
+ },
+ "required": [
+ "txn-groups"
+ ],
+ "type": "object"
+ },
+ "SimulateRequestTransactionGroup": {
+ "description": "A transaction group to simulate.",
+ "properties": {
+ "txns": {
+ "description": "An atomic transaction group.",
+ "items": {
+ "description": "SignedTxn object. Must be canonically encoded.",
+ "format": "json",
+ "type": "string",
+ "x-algorand-format": "SignedTransaction"
+ },
+ "type": "array"
+ }
+ },
+ "required": [
+ "txns"
+ ],
+ "type": "object"
+ },
"SimulateTransactionGroupResult": {
"description": "Simulation result for an atomic transaction group",
"properties": {
+ "app-budget-added": {
+ "description": "Total budget added during execution of app calls in the transaction group.",
+ "type": "integer"
+ },
+ "app-budget-consumed": {
+ "description": "Total budget consumed during execution of app calls in the transaction group.",
+ "type": "integer"
+ },
"failed-at": {
"description": "If present, indicates which transaction in this group caused the failure. This array represents the path to the failing transaction. Indexes are zero based, the first element indicates the top-level transaction, and successive elements indicate deeper inner transactions.",
"items": {
@@ -1865,9 +1992,13 @@
"SimulateTransactionResult": {
"description": "Simulation result for an individual transaction",
"properties": {
- "missing-signature": {
- "description": "A boolean indicating whether this transaction is missing signatures",
- "type": "boolean"
+ "app-budget-consumed": {
+ "description": "Budget used during execution of an app call transaction. This value includes budged used by inner app calls spawned by this transaction.",
+ "type": "integer"
+ },
+ "logic-sig-budget-consumed": {
+ "description": "Budget used during execution of a logic sig transaction.",
+ "type": "integer"
},
"txn-result": {
"$ref": "#/components/schemas/PendingTransactionResponse"
@@ -1878,6 +2009,28 @@
],
"type": "object"
},
+ "SimulationEvalOverrides": {
+ "description": "The set of parameters and limits override during simulation. If this set of parameters is present, then evaluation parameters may differ from standard evaluation in certain ways.",
+ "properties": {
+ "allow-empty-signatures": {
+ "description": "If true, transactions without signatures are allowed and simulated as if they were properly signed.",
+ "type": "boolean"
+ },
+ "extra-opcode-budget": {
+ "description": "The extra opcode budget added to each transaction group during simulation",
+ "type": "integer"
+ },
+ "max-log-calls": {
+ "description": "The maximum log calls one can make during simulation",
+ "type": "integer"
+ },
+ "max-log-size": {
+ "description": "The maximum byte number to log during simulation",
+ "type": "integer"
+ }
+ },
+ "type": "object"
+ },
"StateDelta": {
"description": "Application state delta.",
"items": {
@@ -2110,6 +2263,34 @@
]
}
},
+ "/ready": {
+ "get": {
+ "operationId": "GetReady",
+ "responses": {
+ "200": {
+ "content": {},
+ "description": "OK."
+ },
+ "500": {
+ "content": {},
+ "description": "Internal Error"
+ },
+ "503": {
+ "content": {},
+ "description": "Node not ready yet"
+ },
+ "default": {
+ "content": {},
+ "description": "Unknown Error"
+ }
+ },
+ "summary": "Returns OK if healthy and fully caught up.",
+ "tags": [
+ "public",
+ "common"
+ ]
+ }
+ },
"/swagger.json": {
"get": {
"description": "Returns the entire swagger spec in json.",
@@ -2774,7 +2955,7 @@
},
"/v2/applications/{application-id}/box": {
"get": {
- "description": "Given an application ID and box name, it returns the box name and value (each base64 encoded). Box names must be in the goal app call arg encoding form 'encoding:value'. For ints, use the form 'int:1234'. For raw bytes, use the form 'b64:A=='. For printable strings, use the form 'str:hello'. For addresses, use the form 'addr:XYZ...'.",
+ "description": "Given an application ID and box name, it returns the round, box name, and value (each base64 encoded). Box names must be in the goal app call arg encoding form 'encoding:value'. For ints, use the form 'int:1234'. For raw bytes, use the form 'b64:A=='. For printable strings, use the form 'str:hello'. For addresses, use the form 'addr:XYZ...'.",
"operationId": "GetApplicationBoxByName",
"parameters": [
{
@@ -3675,6 +3856,137 @@
]
}
},
+ "/v2/deltas/txn/group/{id}": {
+ "get": {
+ "description": "Get a ledger delta for a given transaction group.",
+ "operationId": "GetLedgerStateDeltaForTransactionGroup",
+ "parameters": [
+ {
+ "description": "A transaction ID, or transaction group ID",
+ "in": "path",
+ "name": "id",
+ "required": true,
+ "schema": {
+ "pattern": "[A-Z0-9]+",
+ "type": "string"
+ }
+ },
+ {
+ "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.",
+ "in": "query",
+ "name": "format",
+ "schema": {
+ "enum": [
+ "json",
+ "msgpack"
+ ],
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/LedgerStateDelta"
+ }
+ },
+ "application/msgpack": {
+ "schema": {
+ "$ref": "#/components/schemas/LedgerStateDelta"
+ }
+ }
+ },
+ "description": "Response containing a ledger state delta for a single transaction group."
+ },
+ "401": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ },
+ "application/msgpack": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Invalid API Token"
+ },
+ "404": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ },
+ "application/msgpack": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Could not find a delta for transaction ID or group ID"
+ },
+ "408": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ },
+ "application/msgpack": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "timed out on request"
+ },
+ "500": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ },
+ "application/msgpack": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Internal Error"
+ },
+ "501": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ },
+ "application/msgpack": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Not Implemented"
+ },
+ "default": {
+ "content": {},
+ "description": "Unknown Error"
+ }
+ },
+ "summary": "Get a LedgerStateDelta object for a given transaction group",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ]
+ }
+ },
"/v2/deltas/{round}": {
"get": {
"description": "Get ledger deltas for a round.",
@@ -3802,8 +4114,269 @@
"summary": "Get a LedgerStateDelta object for a given round",
"tags": [
"public",
- "data",
- "experimental"
+ "nonparticipating"
+ ]
+ }
+ },
+ "/v2/deltas/{round}/txn/group": {
+ "get": {
+ "description": "Get ledger deltas for transaction groups in a given round.",
+ "operationId": "GetTransactionGroupLedgerStateDeltasForRound",
+ "parameters": [
+ {
+ "description": "The round for which the deltas are desired.",
+ "in": "path",
+ "name": "round",
+ "required": true,
+ "schema": {
+ "minimum": 0,
+ "type": "integer"
+ }
+ },
+ {
+ "description": "Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.",
+ "in": "query",
+ "name": "format",
+ "schema": {
+ "enum": [
+ "json",
+ "msgpack"
+ ],
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "properties": {
+ "Deltas": {
+ "items": {
+ "$ref": "#/components/schemas/LedgerStateDeltaForTransactionGroup"
+ },
+ "type": "array"
+ }
+ },
+ "required": [
+ "Deltas"
+ ],
+ "type": "object"
+ }
+ },
+ "application/msgpack": {
+ "schema": {
+ "properties": {
+ "Deltas": {
+ "items": {
+ "$ref": "#/components/schemas/LedgerStateDeltaForTransactionGroup"
+ },
+ "type": "array"
+ }
+ },
+ "required": [
+ "Deltas"
+ ],
+ "type": "object"
+ }
+ }
+ },
+ "description": "Response containing all ledger state deltas for transaction groups, with their associated Ids, in a single round."
+ },
+ "401": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ },
+ "application/msgpack": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Invalid API Token"
+ },
+ "404": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ },
+ "application/msgpack": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Could not find deltas for round"
+ },
+ "408": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ },
+ "application/msgpack": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "timed out on request"
+ },
+ "500": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ },
+ "application/msgpack": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Internal Error"
+ },
+ "501": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ },
+ "application/msgpack": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Not Implemented"
+ },
+ "default": {
+ "content": {},
+ "description": "Unknown Error"
+ }
+ },
+ "summary": "Get LedgerStateDelta objects for all transaction groups in a given round",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ]
+ }
+ },
+ "/v2/devmode/blocks/offset": {
+ "get": {
+ "description": "Gets the current timestamp offset.",
+ "operationId": "GetBlockTimeStampOffset",
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "properties": {
+ "offset": {
+ "description": "Timestamp offset in seconds.",
+ "type": "integer"
+ }
+ },
+ "required": [
+ "offset"
+ ],
+ "type": "object"
+ }
+ }
+ },
+ "description": "Response containing the timestamp offset in seconds"
+ },
+ "400": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "TimeStamp offset not set."
+ },
+ "default": {
+ "content": {},
+ "description": "Unknown Error"
+ }
+ },
+ "summary": "Returns the timestamp offset. Timestamp offsets can only be set in dev mode.",
+ "tags": [
+ "public",
+ "nonparticipating"
+ ]
+ }
+ },
+ "/v2/devmode/blocks/offset/{offset}": {
+ "post": {
+ "description": "Sets the timestamp offset (seconds) for blocks in dev mode. Providing an offset of 0 will unset this value and try to use the real clock for the timestamp.",
+ "operationId": "SetBlockTimeStampOffset",
+ "parameters": [
+ {
+ "description": "The timestamp offset for blocks in dev mode.",
+ "in": "path",
+ "name": "offset",
+ "required": true,
+ "schema": {
+ "minimum": 0,
+ "type": "integer"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "content": {},
+ "description": "OK"
+ },
+ "400": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Cannot set timestamp offset to a negative integer."
+ },
+ "401": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Invalid API Token"
+ },
+ "500": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ErrorResponse"
+ }
+ }
+ },
+ "description": "Internal Error"
+ },
+ "default": {
+ "content": {},
+ "description": "Unknown Error"
+ }
+ },
+ "summary": "Given a timestamp offset in seconds, adds the offset to every subsequent block header's timestamp.",
+ "tags": [
+ "public",
+ "nonparticipating"
]
}
},
@@ -4711,7 +5284,7 @@
"type": "boolean"
},
"upgrade-vote-rounds": {
- "description": "Total voting ounds for current upgrade",
+ "description": "Total voting rounds for current upgrade",
"type": "integer"
},
"upgrade-votes": {
@@ -4886,7 +5459,7 @@
"type": "boolean"
},
"upgrade-vote-rounds": {
- "description": "Total voting ounds for current upgrade",
+ "description": "Total voting rounds for current upgrade",
"type": "integer"
},
"upgrade-votes": {
@@ -5693,14 +6266,18 @@
],
"requestBody": {
"content": {
- "application/x-binary": {
+ "application/json": {
"schema": {
- "format": "binary",
- "type": "string"
+ "$ref": "#/components/schemas/SimulateRequest"
+ }
+ },
+ "application/msgpack": {
+ "schema": {
+ "$ref": "#/components/schemas/SimulateRequest"
}
}
},
- "description": "The byte encoded transaction to simulate",
+ "description": "The transactions to simulate, along with any other inputs.",
"required": true
},
"responses": {
@@ -5709,6 +6286,9 @@
"application/json": {
"schema": {
"properties": {
+ "eval-overrides": {
+ "$ref": "#/components/schemas/SimulationEvalOverrides"
+ },
"last-round": {
"description": "The round immediately preceding this simulation. State changes through this round were used to run this simulation.",
"type": "integer"
@@ -5723,17 +6303,12 @@
"version": {
"description": "The version of this response object.",
"type": "integer"
- },
- "would-succeed": {
- "description": "Indicates whether the simulated transactions would have succeeded during an actual submission. If any transaction fails or is missing a signature, this will be false.",
- "type": "boolean"
}
},
"required": [
"last-round",
"txn-groups",
- "version",
- "would-succeed"
+ "version"
],
"type": "object"
}
@@ -5741,6 +6316,9 @@
"application/msgpack": {
"schema": {
"properties": {
+ "eval-overrides": {
+ "$ref": "#/components/schemas/SimulationEvalOverrides"
+ },
"last-round": {
"description": "The round immediately preceding this simulation. State changes through this round were used to run this simulation.",
"type": "integer"
@@ -5755,17 +6333,12 @@
"version": {
"description": "The version of this response object.",
"type": "integer"
- },
- "would-succeed": {
- "description": "Indicates whether the simulated transactions would have succeeded during an actual submission. If any transaction fails or is missing a signature, this will be false.",
- "type": "boolean"
}
},
"required": [
"last-round",
"txn-groups",
- "version",
- "would-succeed"
+ "version"
],
"type": "object"
}
@@ -5786,7 +6359,7 @@
}
}
},
- "description": "Bad Request - Malformed Algorand transaction"
+ "description": "Bad Request"
},
"401": {
"content": {
@@ -5803,10 +6376,6 @@
},
"description": "Invalid API Token"
},
- "404": {
- "content": {},
- "description": "Transaction simulator not enabled"
- },
"500": {
"content": {
"application/json": {
@@ -5847,7 +6416,7 @@
"public",
"nonparticipating"
],
- "x-codegen-request-body-name": "rawtxn"
+ "x-codegen-request-body-name": "request"
}
},
"/versions": {
diff --git a/daemon/algod/api/client/restClient.go b/daemon/algod/api/client/restClient.go
index f3f541573..829e7c68e 100644
--- a/daemon/algod/api/client/restClient.go
+++ b/daemon/algod/api/client/restClient.go
@@ -34,6 +34,7 @@ import (
"github.com/algorand/go-algorand/daemon/algod/api/spec/common"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/protocol"
)
@@ -45,10 +46,11 @@ const (
// rawRequestPaths is a set of paths where the body should not be urlencoded
var rawRequestPaths = map[string]bool{
- "/v2/transactions": true,
- "/v2/teal/dryrun": true,
- "/v2/teal/compile": true,
- "/v2/participation": true,
+ "/v2/transactions": true,
+ "/v2/teal/dryrun": true,
+ "/v2/teal/compile": true,
+ "/v2/participation": true,
+ "/v2/transactions/simulate": true,
}
// unauthorizedRequestError is generated when we receive 401 error from the server. This error includes the inner error
@@ -162,38 +164,49 @@ type RawResponse interface {
SetBytes([]byte)
}
+// mergeRawQueries merges two raw queries, appending an "&" if both are non-empty
+func mergeRawQueries(q1, q2 string) string {
+ if q1 == "" || q2 == "" {
+ return q1 + q2
+ }
+ return q1 + "&" + q2
+}
+
// submitForm is a helper used for submitting (ex.) GETs and POSTs to the server
// if expectNoContent is true, then it is expected that the response received will have a content length of zero
-func (client RestClient) submitForm(response interface{}, path string, request interface{}, requestMethod string, encodeJSON bool, decodeJSON bool, expectNoContent bool) error {
+func (client RestClient) submitForm(
+ response interface{}, path string, params interface{}, body interface{},
+ requestMethod string, encodeJSON bool, decodeJSON bool, expectNoContent bool) error {
+
var err error
queryURL := client.serverURL
queryURL.Path = path
var req *http.Request
- var body io.Reader
+ var bodyReader io.Reader
+ var v url.Values
- if request != nil {
- if rawRequestPaths[path] {
- reqBytes, ok := request.([]byte)
- if !ok {
- return fmt.Errorf("couldn't decode raw request as bytes")
- }
- body = bytes.NewBuffer(reqBytes)
- } else {
- v, err := query.Values(request)
- if err != nil {
- return err
- }
+ if params != nil {
+ v, err = query.Values(params)
+ if err != nil {
+ return err
+ }
+ }
- queryURL.RawQuery = v.Encode()
- if encodeJSON {
- jsonValue, _ := json.Marshal(request)
- body = bytes.NewBuffer(jsonValue)
- }
+ if requestMethod == "POST" && rawRequestPaths[path] {
+ reqBytes, ok := body.([]byte)
+ if !ok {
+ return fmt.Errorf("couldn't decode raw request as bytes")
}
+ bodyReader = bytes.NewBuffer(reqBytes)
+ } else if encodeJSON {
+ jsonValue, _ := json.Marshal(params)
+ bodyReader = bytes.NewBuffer(jsonValue)
}
- req, err = http.NewRequest(requestMethod, queryURL.String(), body)
+ queryURL.RawQuery = mergeRawQueries(queryURL.RawQuery, v.Encode())
+
+ req, err = http.NewRequest(requestMethod, queryURL.String(), bodyReader)
if err != nil {
return err
}
@@ -248,26 +261,26 @@ func (client RestClient) submitForm(response interface{}, path string, request i
// get performs a GET request to the specific path against the server
func (client RestClient) get(response interface{}, path string, request interface{}) error {
- return client.submitForm(response, path, request, "GET", false /* encodeJSON */, true /* decodeJSON */, false)
+ return client.submitForm(response, path, request, nil, "GET", false /* encodeJSON */, true /* decodeJSON */, false)
}
// delete performs a DELETE request to the specific path against the server
// when expectNoContent is true, then no content is expected to be returned from the endpoint
func (client RestClient) delete(response interface{}, path string, request interface{}, expectNoContent bool) error {
- return client.submitForm(response, path, request, "DELETE", false /* encodeJSON */, true /* decodeJSON */, expectNoContent)
+ return client.submitForm(response, path, request, nil, "DELETE", false /* encodeJSON */, true /* decodeJSON */, expectNoContent)
}
// getRaw behaves identically to get but doesn't json decode the response, and
// the response must implement the RawResponse interface
func (client RestClient) getRaw(response RawResponse, path string, request interface{}) error {
- return client.submitForm(response, path, request, "GET", false /* encodeJSON */, false /* decodeJSON */, false)
+ return client.submitForm(response, path, request, nil, "GET", false /* encodeJSON */, false /* decodeJSON */, false)
}
// post sends a POST request to the given path with the given request object.
// No query parameters will be sent if request is nil.
// response must be a pointer to an object as post writes the response there.
-func (client RestClient) post(response interface{}, path string, request interface{}, expectNoContent bool) error {
- return client.submitForm(response, path, request, "POST", true /* encodeJSON */, true /* decodeJSON */, expectNoContent)
+func (client RestClient) post(response interface{}, path string, params interface{}, body interface{}, expectNoContent bool) error {
+ return client.submitForm(response, path, params, body, "POST", true /* encodeJSON */, true /* decodeJSON */, expectNoContent)
}
// Status retrieves the StatusResponse from the running node
@@ -284,12 +297,18 @@ func (client RestClient) WaitForBlock(round basics.Round) (response model.NodeSt
return
}
-// HealthCheck does a health check on the the potentially running node,
+// HealthCheck does a health check on the potentially running node,
// returning an error if the API is down
func (client RestClient) HealthCheck() error {
return client.get(nil, "/health", nil)
}
+// ReadyCheck does a readiness check on the potentially running node,
+// returning an error if the node is not ready (caught up and healthy)
+func (client RestClient) ReadyCheck() error {
+ return client.get(nil, "/ready", nil)
+}
+
// StatusAfterBlock waits for a block to occur then returns the StatusResponse after that block
// blocks on the node end
// Not supported
@@ -504,7 +523,7 @@ func (client RestClient) SuggestedParams() (response model.TransactionParameters
// SendRawTransaction gets a SignedTxn and broadcasts it to the network
func (client RestClient) SendRawTransaction(txn transactions.SignedTxn) (response model.PostTransactionsResponse, err error) {
- err = client.post(&response, "/v2/transactions", protocol.Encode(&txn), false)
+ err = client.post(&response, "/v2/transactions", nil, protocol.Encode(&txn), false)
return
}
@@ -518,7 +537,7 @@ func (client RestClient) SendRawTransactionGroup(txgroup []transactions.SignedTx
}
var response model.PostTransactionsResponse
- return client.post(&response, "/v2/transactions", enc, false)
+ return client.post(&response, "/v2/transactions", nil, enc, false)
}
// Block gets the block info for the given round
@@ -538,19 +557,19 @@ func (client RestClient) RawBlock(round uint64) (response []byte, err error) {
// Shutdown requests the node to shut itself down
func (client RestClient) Shutdown() (err error) {
response := 1
- err = client.post(&response, "/v2/shutdown", nil, false)
+ err = client.post(&response, "/v2/shutdown", nil, nil, false)
return
}
// AbortCatchup aborts the currently running catchup
func (client RestClient) AbortCatchup(catchpointLabel string) (response model.CatchpointAbortResponse, err error) {
- err = client.submitForm(&response, fmt.Sprintf("/v2/catchup/%s", catchpointLabel), nil, "DELETE", false, true, false)
+ err = client.submitForm(&response, fmt.Sprintf("/v2/catchup/%s", catchpointLabel), nil, nil, "DELETE", false, true, false)
return
}
// Catchup start catching up to the give catchpoint label
func (client RestClient) Catchup(catchpointLabel string) (response model.CatchpointStartResponse, err error) {
- err = client.submitForm(&response, fmt.Sprintf("/v2/catchup/%s", catchpointLabel), nil, "POST", false, true, false)
+ err = client.submitForm(&response, fmt.Sprintf("/v2/catchup/%s", catchpointLabel), nil, nil, "POST", false, true, false)
return
}
@@ -565,23 +584,52 @@ func (client RestClient) GetGoRoutines(ctx context.Context) (goRoutines string,
return
}
+type compileParams struct {
+ SourceMap bool `url:"sourcemap,omitempty"`
+}
+
// Compile compiles the given program and returned the compiled program
-func (client RestClient) Compile(program []byte) (compiledProgram []byte, programHash crypto.Digest, err error) {
+func (client RestClient) Compile(program []byte, useSourceMap bool) (compiledProgram []byte, programHash crypto.Digest, sourceMap *logic.SourceMap, err error) {
var compileResponse model.CompileResponse
- err = client.submitForm(&compileResponse, "/v2/teal/compile", program, "POST", false, true, false)
+
+ compileRequest := compileParams{SourceMap: useSourceMap}
+
+ err = client.submitForm(&compileResponse, "/v2/teal/compile", compileRequest, program, "POST", false, true, false)
if err != nil {
- return nil, crypto.Digest{}, err
+ return nil, crypto.Digest{}, nil, err
}
compiledProgram, err = base64.StdEncoding.DecodeString(compileResponse.Result)
if err != nil {
- return nil, crypto.Digest{}, err
+ return nil, crypto.Digest{}, nil, err
}
var progAddr basics.Address
progAddr, err = basics.UnmarshalChecksumAddress(compileResponse.Hash)
if err != nil {
- return nil, crypto.Digest{}, err
+ return nil, crypto.Digest{}, nil, err
}
programHash = crypto.Digest(progAddr)
+
+ // fast exit if we don't want sourcemap, then exit with what we have so far
+ if !useSourceMap {
+ return
+ }
+
+ // if we want sourcemap, then we convert the *map[string]interface{} into *logic.SourceMap
+ if compileResponse.Sourcemap == nil {
+ return nil, crypto.Digest{}, nil, fmt.Errorf("requesting for sourcemap but get nothing")
+ }
+
+ var srcMapInstance logic.SourceMap
+ var jsonBytes []byte
+
+ if jsonBytes, err = json.Marshal(*compileResponse.Sourcemap); err != nil {
+ return nil, crypto.Digest{}, nil, err
+ }
+ if err = json.Unmarshal(jsonBytes, &srcMapInstance); err != nil {
+ return nil, crypto.Digest{}, nil, err
+ }
+ sourceMap = &srcMapInstance
+
return
}
@@ -624,7 +672,15 @@ func (client RestClient) doGetWithQuery(ctx context.Context, path string, queryA
// RawDryrun gets the raw DryrunResponse associated with the passed address
func (client RestClient) RawDryrun(data []byte) (response []byte, err error) {
var blob Blob
- err = client.submitForm(&blob, "/v2/teal/dryrun", data, "POST", false /* encodeJSON */, false /* decodeJSON */, false)
+ err = client.submitForm(&blob, "/v2/teal/dryrun", nil, data, "POST", false /* encodeJSON */, false /* decodeJSON */, false)
+ response = blob
+ return
+}
+
+// RawSimulateRawTransaction simulates transactions by taking raw request bytes and returns relevant simulation results as raw bytes.
+func (client RestClient) RawSimulateRawTransaction(data []byte) (response []byte, err error) {
+ var blob Blob
+ err = client.submitForm(&blob, "/v2/transactions/simulate", rawFormat{Format: "msgpack"}, data, "POST", false /* encodeJSON */, false /* decodeJSON */, false)
response = blob
return
}
@@ -650,7 +706,7 @@ func (client RestClient) TransactionProof(txid string, round uint64, hashType cr
// PostParticipationKey sends a key file to the node.
func (client RestClient) PostParticipationKey(file []byte) (response model.PostParticipationResponse, err error) {
- err = client.post(&response, "/v2/participation", file, false)
+ err = client.post(&response, "/v2/participation", nil, file, false)
return
}
@@ -670,14 +726,13 @@ func (client RestClient) GetParticipationKeyByID(participationID string) (respon
func (client RestClient) RemoveParticipationKeyByID(participationID string) (err error) {
err = client.delete(nil, fmt.Sprintf("/v2/participation/%s", participationID), nil, true)
return
-
}
/* Endpoint registered for follower nodes */
// SetSyncRound sets the sync round for the catchup service
func (client RestClient) SetSyncRound(round uint64) (err error) {
- err = client.post(nil, fmt.Sprintf("/v2/ledger/sync/%d", round), nil, true)
+ err = client.post(nil, fmt.Sprintf("/v2/ledger/sync/%d", round), nil, nil, true)
return
}
@@ -698,3 +753,15 @@ func (client RestClient) GetLedgerStateDelta(round uint64) (response model.Ledge
err = client.get(&response, fmt.Sprintf("/v2/deltas/%d", round), nil)
return
}
+
+// SetBlockTimestampOffset sets the offset in seconds to add to the block timestamp when in devmode
+func (client RestClient) SetBlockTimestampOffset(offset uint64) (err error) {
+ err = client.post(nil, fmt.Sprintf("/v2/devmode/blocks/offset/%d", offset), nil, nil, true)
+ return
+}
+
+// GetBlockTimestampOffset gets the offset in seconds which is being added to devmode blocks
+func (client RestClient) GetBlockTimestampOffset() (response model.GetBlockTimeStampOffsetResponse, err error) {
+ err = client.get(&response, "/v2/devmode/blocks/offset", nil)
+ return
+}
diff --git a/daemon/algod/api/server/common/handlers.go b/daemon/algod/api/server/common/handlers.go
index 0aa107f42..95a1dbcce 100644
--- a/daemon/algod/api/server/common/handlers.go
+++ b/daemon/algod/api/server/common/handlers.go
@@ -18,14 +18,17 @@ package common
import (
"encoding/json"
+ "fmt"
"net/http"
"github.com/labstack/echo/v4"
+ "github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/daemon/algod/api"
"github.com/algorand/go-algorand/daemon/algod/api/server/lib"
"github.com/algorand/go-algorand/daemon/algod/api/spec/common"
+ "github.com/algorand/go-algorand/node"
)
// GenesisJSON is an httpHandler for route GET /genesis
@@ -89,6 +92,63 @@ func HealthCheck(ctx lib.ReqContext, context echo.Context) {
json.NewEncoder(w).Encode(nil)
}
+// Ready is a httpHandler for route GET /ready
+// it serves "readiness" probe on if the node is healthy and fully caught-up.
+func Ready(ctx lib.ReqContext, context echo.Context) {
+ // swagger:operation GET /ready Ready
+ //---
+ // Summary: Returns OK if healthy and fully caught up.
+ // Produces:
+ // - application/json
+ // Schemes:
+ // - http
+ // Responses:
+ // 200:
+ // description: OK.
+ // 500:
+ // description: Internal Error.
+ // 503:
+ // description: Node not ready yet.
+ // default: { description: Unknown Error }
+ w := context.Response().Writer
+ w.Header().Set("Content-Type", "application/json")
+
+ stat, err := ctx.Node.Status()
+ code := http.StatusOK
+
+ // isReadyFromStat checks the `Node.Status()` result
+ // and decide if the node is at the latest round
+ // must satisfy following sub conditions:
+ // 1. the node is not in a fast-catchup stage
+ // 2. the node's time since last round should be [0, deadline),
+ // while deadline = bigLambda + smallLambda = 17s
+ // 3. the node's catchup time is 0
+ isReadyFromStat := func(status node.StatusReport) bool {
+ timeSinceLastRound := status.TimeSinceLastRound().Milliseconds()
+
+ return len(status.Catchpoint) == 0 &&
+ timeSinceLastRound >= 0 &&
+ timeSinceLastRound < agreement.DeadlineTimeout().Milliseconds() &&
+ status.CatchupTime.Milliseconds() == 0
+ }
+
+ if err != nil {
+ code = http.StatusInternalServerError
+ ctx.Log.Error(err)
+ } else if stat.StoppedAtUnsupportedRound {
+ code = http.StatusInternalServerError
+ err = fmt.Errorf("stopped at an unsupported round")
+ ctx.Log.Error(err)
+ } else if !isReadyFromStat(stat) {
+ code = http.StatusServiceUnavailable
+ err = fmt.Errorf("ready failed as the node is catching up")
+ ctx.Log.Info(err)
+ }
+
+ w.WriteHeader(code)
+ _ = json.NewEncoder(w).Encode(nil)
+}
+
// VersionsHandler is an httpHandler for route GET /versions
func VersionsHandler(ctx lib.ReqContext, context echo.Context) {
// swagger:route GET /versions GetVersion
diff --git a/daemon/algod/api/server/common/routes.go b/daemon/algod/api/server/common/routes.go
index 545d92bb2..1a78fe00a 100644
--- a/daemon/algod/api/server/common/routes.go
+++ b/daemon/algod/api/server/common/routes.go
@@ -42,6 +42,13 @@ var Routes = lib.Routes{
},
lib.Route{
+ Name: "ready",
+ Method: "GET",
+ Path: "/ready",
+ HandlerFunc: Ready,
+ },
+
+ lib.Route{
Name: "swagger.json",
Method: "GET",
Path: "/swagger.json",
diff --git a/daemon/algod/api/server/common/test/handlers_test.go b/daemon/algod/api/server/common/test/handlers_test.go
new file mode 100644
index 000000000..c602b5bde
--- /dev/null
+++ b/daemon/algod/api/server/common/test/handlers_test.go
@@ -0,0 +1,88 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package test
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/labstack/echo/v4"
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/daemon/algod/api/server/common"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/lib"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/node"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func mockNodeStatusInRangeHelper(
+ t *testing.T, statusCode MockNodeCatchupStatus,
+ expectedErr error, expectedStatus node.StatusReport) {
+ mockNodeInstance := makeMockNode(statusCode)
+ status, err := mockNodeInstance.Status()
+ if expectedErr != nil {
+ require.Error(t, err, expectedErr)
+ } else {
+ require.Equal(t, expectedStatus, status)
+ }
+}
+
+func TestMockNodeStatus(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ mockNodeStatusInRangeHelper(
+ t, CaughtUpAndReady, nil, cannedStatusReportCaughtUpAndReadyGolden)
+ mockNodeStatusInRangeHelper(
+ t, CatchingUpFast, nil, cannedStatusReportCatchingUpFastGolden)
+ mockNodeStatusInRangeHelper(
+ t, StoppedAtUnsupported, nil, cannedStatusReportStoppedAtUnsupportedGolden)
+ mockNodeStatusInRangeHelper(
+ t, 399, fmt.Errorf("catchup status out of scope error"), node.StatusReport{})
+}
+
+func readyEndpointTestHelper(
+ t *testing.T, node *mockNode, expectedCode int) {
+ reqCtx := lib.ReqContext{
+ Node: node,
+ Log: logging.NewLogger(),
+ Shutdown: make(chan struct{}),
+ }
+
+ e := echo.New()
+ req := httptest.NewRequest(http.MethodGet, "/", nil)
+ rec := httptest.NewRecorder()
+ c := e.NewContext(req, rec)
+
+ common.Ready(reqCtx, c)
+ require.Equal(t, expectedCode, rec.Code)
+}
+
+func TestReadyEndpoint(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ mockNodeInstance := makeMockNode(CaughtUpAndReady)
+ readyEndpointTestHelper(t, mockNodeInstance, http.StatusOK)
+
+ mockNodeInstance.catchupStatus = CatchingUpFast
+ readyEndpointTestHelper(t, mockNodeInstance, http.StatusServiceUnavailable)
+
+ mockNodeInstance.catchupStatus = StoppedAtUnsupported
+ readyEndpointTestHelper(t, mockNodeInstance, http.StatusInternalServerError)
+}
diff --git a/daemon/algod/api/server/common/test/helpers.go b/daemon/algod/api/server/common/test/helpers.go
new file mode 100644
index 000000000..27d534ad5
--- /dev/null
+++ b/daemon/algod/api/server/common/test/helpers.go
@@ -0,0 +1,139 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package test
+
+import (
+ "fmt"
+
+ "github.com/stretchr/testify/mock"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/node"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+var cannedStatusReportCaughtUpAndReadyGolden = node.StatusReport{
+ LastRound: basics.Round(1),
+ LastVersion: protocol.ConsensusCurrentVersion,
+ NextVersion: protocol.ConsensusCurrentVersion,
+ NextVersionRound: basics.Round(2),
+ NextVersionSupported: true,
+ StoppedAtUnsupportedRound: false,
+ Catchpoint: "",
+ CatchpointCatchupAcquiredBlocks: 0,
+ CatchpointCatchupProcessedAccounts: 0,
+ CatchpointCatchupVerifiedAccounts: 0,
+ CatchpointCatchupTotalAccounts: 0,
+ CatchpointCatchupTotalKVs: 0,
+ CatchpointCatchupProcessedKVs: 0,
+ CatchpointCatchupVerifiedKVs: 0,
+ CatchpointCatchupTotalBlocks: 0,
+ LastCatchpoint: "",
+ CatchupTime: 0,
+}
+
+var cannedStatusReportCatchingUpFastGolden = node.StatusReport{
+ LastRound: basics.Round(97000),
+ LastVersion: protocol.ConsensusCurrentVersion,
+ NextVersion: protocol.ConsensusCurrentVersion,
+ NextVersionRound: 200000,
+ NextVersionSupported: true,
+ StoppedAtUnsupportedRound: false,
+ Catchpoint: "5894690#DVFRZUYHEFKRLK5N6DNJRR4IABEVN2D6H76F3ZSEPIE6MKXMQWQA",
+ CatchpointCatchupAcquiredBlocks: 0,
+ CatchpointCatchupProcessedAccounts: 0,
+ CatchpointCatchupVerifiedAccounts: 0,
+ CatchpointCatchupTotalAccounts: 0,
+ CatchpointCatchupTotalKVs: 0,
+ CatchpointCatchupProcessedKVs: 0,
+ CatchpointCatchupVerifiedKVs: 0,
+ CatchpointCatchupTotalBlocks: 0,
+ LastCatchpoint: "",
+ UpgradePropose: "upgradePropose",
+ UpgradeApprove: false,
+ UpgradeDelay: 0,
+ NextProtocolVoteBefore: 100000,
+ NextProtocolApprovals: 5000,
+ CatchupTime: 10000,
+}
+
+var cannedStatusReportStoppedAtUnsupportedGolden = node.StatusReport{
+ LastRound: basics.Round(97000),
+ LastVersion: protocol.ConsensusCurrentVersion,
+ NextVersion: protocol.ConsensusCurrentVersion,
+ NextVersionRound: 200000,
+ NextVersionSupported: true,
+ StoppedAtUnsupportedRound: true,
+ Catchpoint: "",
+ CatchpointCatchupAcquiredBlocks: 0,
+ CatchpointCatchupProcessedAccounts: 0,
+ CatchpointCatchupVerifiedAccounts: 0,
+ CatchpointCatchupTotalAccounts: 0,
+ CatchpointCatchupTotalKVs: 0,
+ CatchpointCatchupProcessedKVs: 0,
+ CatchpointCatchupVerifiedKVs: 0,
+ CatchpointCatchupTotalBlocks: 0,
+ LastCatchpoint: "",
+ UpgradePropose: "upgradePropose",
+ UpgradeApprove: false,
+ UpgradeDelay: 0,
+ NextProtocolVoteBefore: 100000,
+ NextProtocolApprovals: 5000,
+ CatchupTime: 0,
+}
+
+// MockNodeCatchupStatus enumerates over possible mock status of a mock node in testing
+type MockNodeCatchupStatus uint
+
+const (
+ // CaughtUpAndReady stands for testing mock node is finishing catching up, /ready should return 200
+ CaughtUpAndReady = iota
+ // CatchingUpFast stands for mock node is mocking fast catch up state, /ready should return 400
+ CatchingUpFast
+ // StoppedAtUnsupported stands for mock node stopped at unsupported round, /ready should return 500
+ StoppedAtUnsupported
+)
+
+// mockNode is the "node" we use in common endpoint testing, implements NodeInterface
+type mockNode struct {
+ mock.Mock
+ catchupStatus MockNodeCatchupStatus
+}
+
+// makeMockNode creates a mock common node for ready endpoint testing.
+func makeMockNode(catchupStatus MockNodeCatchupStatus) *mockNode {
+ return &mockNode{catchupStatus: catchupStatus}
+}
+
+func (m *mockNode) Status() (s node.StatusReport, err error) {
+ switch m.catchupStatus {
+ case CaughtUpAndReady:
+ s = cannedStatusReportCaughtUpAndReadyGolden
+ case CatchingUpFast:
+ s = cannedStatusReportCatchingUpFastGolden
+ case StoppedAtUnsupported:
+ s = cannedStatusReportStoppedAtUnsupportedGolden
+ default:
+ err = fmt.Errorf("catchup status out of scope error")
+ }
+ return
+}
+
+func (m *mockNode) GenesisID() string { panic("not implemented") }
+
+func (m *mockNode) GenesisHash() crypto.Digest { panic("not implemented") }
diff --git a/daemon/algod/api/server/lib/common.go b/daemon/algod/api/server/lib/common.go
index a04d85a95..e44e624c1 100644
--- a/daemon/algod/api/server/lib/common.go
+++ b/daemon/algod/api/server/lib/common.go
@@ -23,6 +23,7 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/node"
)
// GenesisJSONText is initialized when the node starts.
@@ -32,6 +33,7 @@ var GenesisJSONText string
type NodeInterface interface {
GenesisHash() crypto.Digest
GenesisID() string
+ Status() (s node.StatusReport, err error)
}
// HandlerFunc defines a wrapper for http.HandlerFunc that includes a context
@@ -57,8 +59,8 @@ type ReqContext struct {
Shutdown <-chan struct{}
}
-// ErrorResponse sets the specified status code (should != 200), and fills in the
-// a human readable error.
+// ErrorResponse sets the specified status code (should != 200), and fills in
+// a human-readable error.
func ErrorResponse(w http.ResponseWriter, status int, internalErr error, publicErr string, logger logging.Logger) {
logger.Info(internalErr)
diff --git a/daemon/algod/api/server/router.go b/daemon/algod/api/server/router.go
index 632e8cbb9..6797bd988 100644
--- a/daemon/algod/api/server/router.go
+++ b/daemon/algod/api/server/router.go
@@ -51,6 +51,8 @@ const (
apiV1Tag = "/v1"
// TokenHeader is the header where we put the token.
TokenHeader = "X-Algo-API-Token"
+ // maxRequestBodyBytes is the maximum request body size that we allow in our APIs.
+ maxRequestBodyBytes = "10MB"
)
// wrapCtx passes a common context to each request without a global variable.
@@ -90,7 +92,9 @@ func NewRouter(logger logging.Logger, node APINodeInterface, shutdown <-chan str
middleware.RemoveTrailingSlash())
e.Use(
middlewares.MakeLogger(logger),
- middlewares.MakeCORS(TokenHeader))
+ middlewares.MakeCORS(TokenHeader),
+ middleware.BodyLimit(maxRequestBodyBytes),
+ )
// Request Context
ctx := lib.ReqContext{Node: node, Log: logger, Shutdown: shutdown}
diff --git a/daemon/algod/api/server/v2/dryrun.go b/daemon/algod/api/server/v2/dryrun.go
index 49a5a427d..685ec1e68 100644
--- a/daemon/algod/api/server/v2/dryrun.go
+++ b/daemon/algod/api/server/v2/dryrun.go
@@ -249,6 +249,10 @@ func (dl *dryrunLedger) CheckDup(config.ConsensusParams, basics.Round, basics.Ro
return nil
}
+func (dl *dryrunLedger) GetStateProofVerificationContext(_ basics.Round) (*ledgercore.StateProofVerificationContext, error) {
+ return nil, fmt.Errorf("dryrunLedger: GetStateProofVerificationContext, needed for state proof verification, is not implemented in dryrun")
+}
+
func (dl *dryrunLedger) lookup(rnd basics.Round, addr basics.Address) (basics.AccountData, basics.Round, error) {
// check accounts from debug records uploaded
out := basics.AccountData{}
@@ -472,7 +476,7 @@ func doDryrunRequest(dr *DryrunRequest, response *model.DryrunResponse) {
}
}
if !found {
- (*acct.AppsLocalState) = append(*acct.AppsLocalState, ls)
+ *acct.AppsLocalState = append(*acct.AppsLocalState, ls)
}
}
dl.dr.Accounts[idx] = acct
diff --git a/daemon/algod/api/server/v2/dryrun_test.go b/daemon/algod/api/server/v2/dryrun_test.go
index fd74e9010..2f43b12a6 100644
--- a/daemon/algod/api/server/v2/dryrun_test.go
+++ b/daemon/algod/api/server/v2/dryrun_test.go
@@ -486,7 +486,7 @@ func TestDryrunGlobal2(t *testing.T) {
Txn: transactions.Transaction{
Type: protocol.ApplicationCallTx,
ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
- ApplicationID: 1,
+ ApplicationID: 1234,
ApplicationArgs: [][]byte{
[]byte("check"),
[]byte("bar"),
@@ -503,7 +503,7 @@ func TestDryrunGlobal2(t *testing.T) {
}
dr.Apps = []model.Application{
{
- Id: 1,
+ Id: 1234,
Params: model.ApplicationParams{
ApprovalProgram: globalTestProgram,
GlobalState: &gkv,
@@ -691,7 +691,7 @@ func TestDryrunLocalCheck(t *testing.T) {
Txn: transactions.Transaction{
Type: protocol.ApplicationCallTx,
ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
- ApplicationID: 1,
+ ApplicationID: 1234,
ApplicationArgs: [][]byte{
[]byte("check"),
[]byte("bar"),
@@ -702,13 +702,13 @@ func TestDryrunLocalCheck(t *testing.T) {
}
dr.Apps = []model.Application{
{
- Id: 1,
+ Id: 1234,
Params: model.ApplicationParams{
ApprovalProgram: localStateCheckProg,
},
},
}
- localv := make(model.TealKeyValueStore, 1)
+ localv := make(model.TealKeyValueStore, 1234)
localv[0] = model.TealKeyValue{
Key: b64("foo"),
Value: model.TealValue{
@@ -722,7 +722,7 @@ func TestDryrunLocalCheck(t *testing.T) {
Status: "Online",
Address: basics.Address{}.String(),
AppsLocalState: &[]model.ApplicationLocalState{{
- Id: 1,
+ Id: 1234,
KeyValue: &localv,
}},
},
diff --git a/daemon/algod/api/server/v2/errors.go b/daemon/algod/api/server/v2/errors.go
index 947a38fa9..18a389edf 100644
--- a/daemon/algod/api/server/v2/errors.go
+++ b/daemon/algod/api/server/v2/errors.go
@@ -24,17 +24,16 @@ var (
errBoxDoesNotExist = "box not found"
errFailedLookingUpLedger = "failed to retrieve information from the ledger"
errFailedLookingUpTransactionPool = "failed to retrieve information from the transaction pool"
- errFailedRetrievingStateDelta = "failed retrieving State Delta"
+ errFailedRetrievingStateDelta = "failed retrieving State Delta: %v"
errFailedRetrievingNodeStatus = "failed retrieving node status"
- errFailedRetrievingLatestBlockHeaderStatus = "failed retrieving latests block header"
+ errFailedRetrievingLatestBlockHeaderStatus = "failed retrieving latest block header"
+ errFailedRetrievingTimeStampOffset = "failed retrieving timestamp offset from node: %v"
+ errFailedSettingTimeStampOffset = "failed to set timestamp offset on the node: %v"
errFailedRetrievingSyncRound = "failed retrieving sync round from ledger"
errFailedSettingSyncRound = "failed to set sync round on the ledger"
errFailedParsingFormatOption = "failed to parse the format option"
errFailedToParseAddress = "failed to parse the address"
errFailedToParseExclude = "failed to parse exclude"
- errFailedToParseTransaction = "failed to parse transaction"
- errFailedToParseBlock = "failed to parse block"
- errFailedToParseCert = "failed to parse cert"
errFailedToEncodeResponse = "failed to encode response"
errInternalFailure = "internal failure"
errNoValidTxnSpecified = "no valid transaction ID was specified"
@@ -48,4 +47,5 @@ var (
errOperationNotAvailableDuringCatchup = "operation not available during catchup"
errRESTPayloadZeroLength = "payload was of zero length"
errRoundGreaterThanTheLatest = "given round is greater than the latest round"
+ errFailedRetrievingTracer = "failed retrieving the expected tracer from ledger"
)
diff --git a/daemon/algod/api/server/v2/generated/data/routes.go b/daemon/algod/api/server/v2/generated/data/routes.go
index 455667080..7d2982705 100644
--- a/daemon/algod/api/server/v2/generated/data/routes.go
+++ b/daemon/algod/api/server/v2/generated/data/routes.go
@@ -21,9 +21,6 @@ import (
// ServerInterface represents all server handlers.
type ServerInterface interface {
- // Get a LedgerStateDelta object for a given round
- // (GET /v2/deltas/{round})
- GetLedgerStateDelta(ctx echo.Context, round uint64, params GetLedgerStateDeltaParams) error
// Removes minimum sync round restriction from the ledger.
// (DELETE /v2/ledger/sync)
UnsetSyncRound(ctx echo.Context) error
@@ -40,33 +37,6 @@ type ServerInterfaceWrapper struct {
Handler ServerInterface
}
-// GetLedgerStateDelta converts echo context to params.
-func (w *ServerInterfaceWrapper) GetLedgerStateDelta(ctx echo.Context) error {
- var err error
- // ------------- Path parameter "round" -------------
- var round uint64
-
- err = runtime.BindStyledParameterWithLocation("simple", false, "round", runtime.ParamLocationPath, ctx.Param("round"), &round)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err))
- }
-
- ctx.Set(Api_keyScopes, []string{""})
-
- // Parameter object where we will unmarshal all parameters from the context
- var params GetLedgerStateDeltaParams
- // ------------- Optional query parameter "format" -------------
-
- err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), &params.Format)
- if err != nil {
- return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err))
- }
-
- // Invoke the callback with all the unmarshalled arguments
- err = w.Handler.GetLedgerStateDelta(ctx, round, params)
- return err
-}
-
// UnsetSyncRound converts echo context to params.
func (w *ServerInterfaceWrapper) UnsetSyncRound(ctx echo.Context) error {
var err error
@@ -135,7 +105,6 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL
Handler: si,
}
- router.GET(baseURL+"/v2/deltas/:round", wrapper.GetLedgerStateDelta, m...)
router.DELETE(baseURL+"/v2/ledger/sync", wrapper.UnsetSyncRound, m...)
router.GET(baseURL+"/v2/ledger/sync", wrapper.GetSyncRound, m...)
router.POST(baseURL+"/v2/ledger/sync/:round", wrapper.SetSyncRound, m...)
@@ -145,174 +114,180 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9a3PctpLoX0HNbpUfdyjJr5xYVam9ip3kaOM4LkvJubu2b4Ihe2ZwRAIMAI5m4qv/",
- "fgsNgARJkEM9Yidb55OtIdBoNBqNRr/wcZaKohQcuFaz44+zkkpagAaJf9E0FRXXCcvMXxmoVLJSM8Fn",
- "x/4bUVoyvprNZ8z8WlK9ns1nnBbQtDH95zMJv1VMQjY71rKC+UylayioAax3pWldQ9omK5E4ECcWxOnL",
- "2dXIB5plEpTqY/kjz3eE8TSvMiBaUq5oaj4pcsn0mug1U8R1JowTwYGIJdHrVmOyZJBn6sBP8rcK5C6Y",
- "pRt8eEpXDYqJFDn08XwhigXj4LGCGql6QYgWJIMlNlpTTcwIBlffUAuigMp0TZZC7kHVIhHiC7wqZsfv",
- "Zgp4BhJXKwW2wf8uJcDvkGgqV6BnH+axyS01yESzIjK1U0d9CarKtSLYFue4YhvgxPQ6ID9USpMFEMrJ",
- "229fkCdPnjw3Eymo1pA5JhucVTN6OCfbfXY8y6gG/7nPazRfCUl5ltTt3377Asc/cxOc2ooqBfHNcmK+",
- "kNOXQxPwHSMsxLiGFa5Di/tNj8imaH5ewFJImLgmtvGdLko4/mddlZTqdF0KxnVkXQh+JfZzVIYF3cdk",
- "WI1Aq31pKCUN0HdHyfMPHx/NHx1d/du7k+S/3Z/PnlxNnP6LGu4eCkQbppWUwNNdspJAcbesKe/T463j",
- "B7UWVZ6RNd3g4tMCRb3rS0xfKzo3NK8Mn7BUipN8JRShjo0yWNIq18QPTCqeGzFloDluJ0yRUooNyyCb",
- "G+l7uWbpmqRUWRDYjlyyPDc8WCnIhngtPruRzXQVksTgdSN64IT+vMRo5rWHErBFaZCkuVCQaLHnePIn",
- "DuUZCQ+U5qxS1zusyPkaCA5uPtjDFmnHDU/n+Y5oXNeMUEUo8UfTnLAl2YmKXOLi5OwC+7vZGKoVxBAN",
- "F6d1jprNO0S+HjEixFsIkQPlSDy/7/ok40u2qiQocrkGvXZnngRVCq6AiMU/IdVm2f/z7MfXREjyAyhF",
- "V/CGphcEeCoyyA7I6ZJwoQPWcLyENDQ9h+bh8Iod8v9UwvBEoVYlTS/iJ3rOChaZ1Q90y4qqILwqFiDN",
- "kvojRAsiQVeSDyFkIe5hxYJu+4Oey4qnuP7NsC1dznAbU2VOd0iwgm6/Opo7dBSheU5K4BnjK6K3fFCP",
- "M2PvRy+RouLZBDVHmzUNDlZVQsqWDDJSQxnBxA2zDx/Gr4dPo3wF6Hggg+jUo+xBh8M2wjNmd5svpKQr",
- "CFjmgPzkhBt+1eICeM3oZLHDT6WEDROVqjsN4IhDj2vgXGhISglLFuGxM0cOI2BsGyeBC6cDpYJryjhk",
- "Rjgj0kKDFVaDOAUDjt93+qf4gir44unQGd98nbj6S9Fd9dEVn7Ta2CixWzJydJqvbsPGNatW/wn3w3Bs",
- "xVaJ/bm3kGx1bk6bJcvxJPqnWT9PhkqhEGgRwp9Niq041ZWE4/f8ofmLJORMU55RmZlfCvvTD1Wu2Rlb",
- "mZ9y+9MrsWLpGVsNELPGNXrhwm6F/cfAi4tjvY3eK14JcVGV4YTS1sV1sSOnL4cW2cK8LmOe1Lfd8OJx",
- "vvWXkev20Nt6IQeQHKRdSU3DC9hJMNjSdIn/bJfIT3Qpfzf/lGVueutyGSOt4WN3JKP5wJkVTsoyZyk1",
- "RHzrPpuvRgiAvUjQpsUhHqjHHwMUSylKkJpZoLQsk1ykNE+Uphoh/buE5ex49m+Hjf3l0HZXh8Hgr0yv",
- "M+xkVFarBiW0LK8B441RfdSIsDACGj+hmLBiD5Umxu0iGlZiRgTnsKFcHzRXlpY8qDfwOzdSQ2+r7Vh6",
- "d65ggwQntuEClNWAbcN7igSkJ0hWgmRFhXSVi0X9w/2TsmwoiN9PytLSA7VHYKiYwZYprR7g9Gmzk8Jx",
- "Tl8ekO9C2KiKC57vzOFgVQ1zNizdqeVOsdq25ObQQLynCC6nkAdmaTwZjJp/FxyH14q1yI3Ws5dXTOO/",
- "u7Yhm5nfJ3X+a7BYSNth5sKLlqOcvePgL8Hl5n6Hc/qM48w9B+Sk2/dmbGOgxBnmRrwyup4W7ggdaxJe",
- "SlpaBN0Xe5Yyjpc028jiektpOlHQRXEO9nDAa4jVjffa3v0QxQRZoYPD17lIL/5O1foO9vzCw+pvPxyG",
- "rIFmIMmaqvXBLKZlhNurgTZli5mGeMEni2Cog3qKdzW9PVPLqKbB1By+cbXEkh77odADGbm7/Ij/oTkx",
- "n83eNqLfgj0g5yjAlN3OzsmQmdu+vSDYkUwDtEIIUtgLPjG37mth+aIZPL5Ok9boG2tTcCvkJoErJLZ3",
- "vg2+FtsYDl+LbW8LiC2ou+APAwfVSA2FmoDfS4eZwPV35KNS0l2fyAh7CpHNBI3qqnA38PDEN6M0xtmT",
- "hZA3kz4dscJJY3Im1EANhO+8QyRsWpWJY8WI2co26ABqvHzjQqMLPkaxFhXONP0DqKAM1LugQhvQXVNB",
- "FCXL4Q5Yfx0V+guq4Mljcvb3k2ePHv/y+NkXhiVLKVaSFmSx06DIfXc3I0rvcnjQnxnejqpcx6F/8dQb",
- "KttwY3CUqGQKBS37oKwB1KpAthkx7fpUa5MZZ10jOGVznoOR5JbsxNr2DWovmTIaVrG4k8UYIljWjJIR",
- "h0kGe5nputNrhtmFU5Q7Wd3FVRakFDJiX8MtpkUq8mQDUjER8aa8cS2Ia+HV27L7u8WWXFJFzNho+q04",
- "KhQRztJbPl3uW9DnW97QZlTy2/lGZufGnbIubeJ7S6IiJchEbznJYFGtWjehpRQFoSTDjnhGfwf6bMdT",
- "tKrdBZMOX9MKxtHEr3Y8De5sZqFyyFatRbj93axLFW+fs0PdUxF0DDle4We81r+EXNM711+6A8Rwf+EX",
- "0iJLMtMQb8Gv2GqtAwXzjRRiefc4xkaJIYofrHqemz59Jf21yMBMtlJ3cBg3wBpeN2sacjhdiEoTSrjI",
- "AC0qlYof0wOee3QZoqdThye/XluNewGGkVJamdlWJUE/Xk9yNB0TmlruTZA0asCLUbufbCs7nPUK5xJo",
- "Zm71wIlYOFeBc2LgJCk6IbU/6JySENlLLbxKKVJQCrLEmSj2oubbWSGiR+iEiCPC9ShECbKk8tbIXmz2",
- "4nkBuwRd5orc//5n9eAz4KuFpvkewmKbGHnrC5/zB/Wxnjb8GMN1Bw/ZjkogXuaa26UREDloGCLhtWgy",
- "uH5djHqreHuybECiZ+YP5Xg/yO0YqEb1D+b322JblQOBYO6ic84KtNtxyoWCVPBMRYHlVOlkn1g2jVq3",
- "MTODQBLGJDECHlBKXlGlrTeR8QyNIPY4wXGsgmKGGEZ4UCE1kH/2umgfdmrOQa4qVSumqipLITVksTlw",
- "2I6M9Rq29VhiGcCutV8tSKVgH+QhKgXwHbHsTCyBqK6N7s7d3p8cmqbNOb+LkrKFREOIMUTOfKuAumEw",
- "zAAiTDWEtozDVIdz6gic+UxpUZZGWuik4nW/ITKd2dYn+qembZ+5qG7O7UyAwhgc195hfmkpa8Og1tRc",
- "oREyKeiF0T3wQmzdnn2czWZMFOMpJGOcb7blmWkVboG9m7QqV5JmkGSQ010f6E/2M7GfxwDgijcXH6Eh",
- "sfEs8UVvONmHD4yAFghPxZRHgl9IaraguXk0DOJ674GcAcKOCSfHR/dqUDhWdIk8PJy2XeoIRDwNN0Kb",
- "FbfsgBg7gT4F3wEy1JBvTgnsnDTXsu4Q/wXKDVCrEdcfZAdqaAoN/GtNYMCY5iKFg+3Ske4dARyVmoNS",
- "bI8YGdqxA5a9N1RqlrISrzrfw+7Ob37dAaL+JpKBpiyHjAQf7C2wDPsTG4jRhXmzm+AkI0wf/Z4VJjKd",
- "nCnUeNrIX8AOr9xvbITfeRAXeAdX2QhUczxRThBRHzdkNPCwCWxpqvOd0dP0GnbkEiQQVS0KprWN3G3f",
- "dLUokxBA1MA9MqLz5tjoOL8CU9xLZwgqmF5/KeYzeyUYx++8cy9okcNdBUoh8gnGox4xohhMcvyTUphV",
- "Zy6I2IeRek5qIemENrry6tP/nmqRGWdA/ktUJKUcb1yVhlqlERL1BNQfzQhGA6vHdC7+hkKQQwH2Iolf",
- "Hj7sTvzhQ7fmTJElXPrIe9OwS46HD9GM80Yo3dpcd2AqNNvtNHJ8oOUfzz0XvNCRKftdzA7ylJV80wFe",
- "uwvMnlLKMa6Z/q0FQGdnbqfMPeSRae51hDvJqB+Ajs0b1/2MFVVO9V24L0b10fo+wYoCMkY15DtSSkjB",
- "RlcbBUtZXAxqxMZdpWvKV6hXS1GtXOCPhYOCsVLWgiEr3gMRVT70licrKaoyJihdsKcPsDdqB1Bz8wkI",
- "iZ2tnn9J6/FcTsWUE8wTPFid7wzMIa/CfDZ4MTRE3TQXQ0ucdpZAnAqY9pCoKk0BoiHAsStXPdVONmST",
- "3+IAGrWhkjYGitBUVzQPuY6cLgnlu3aaJGW5MlKQKYLtTOcmrnZu5+ZzWJY0t77ZSFJFuFNaGl+w8g1J",
- "u6SY6HdAJjHaUJ8zQgY028uw8R9jw29Ax7DsDxwEXTUfh+KuzP07392BGmQBEQmlBIWHVmi3UvarWIa5",
- "T+5UUzuloeib9m3XXwYEzdvBC6TgOeOQFILDLpruyzj8gB+jggMPzoHOqMIM9e3eSlr4d9BqjzOFG29L",
- "X1ztQBa9qQMO72Dxu3A7Xp0w6wutlpCXhJI0Z2jTFFxpWaX6PadoNQk2WyQww98Ph+1oL3yTuOEuYldz",
- "oN5zikE5tS0l6kxeQsRw8C2AN6eparUC1ZGfZAnwnrtWjJOKM41jFWa9ErtgJUiMjjiwLQu6MyIQzX6/",
- "gxRkUem2TMbME6WNuLQuJjMMEcv3nGqSg7lT/8D4+RbBeRet5xkO+lLIi5oK8SNkBRwUU0k8gOQ7+xVj",
- "+9z01y7ODzOF7WfrlDDwm/SUHRpVmuzX/3v/P47fnST/TZPfj5Ln/+vww8enVw8e9n58fPXVV/+v/dOT",
- "q68e/Me/x1bK4x7Li3CYn750l7XTl6iRN16JHu6fzCJdMJ5EmSz0vXd4i9zHHEDHQA/a9hq9hvdcb7lh",
- "pA3NWWZUrpuwQ1fE9fai3R0drmktRMc+4+d6TT33FlKGRIRMRzTe+Bjvx1zFM5DQTeaSinC/LCtul9Ir",
- "ujbA3se+iOW8zjKzBSiOCaYgrakP3HJ/Pn72xWzepA7V32fzmfv6IcLJLNtGtUPYxq4vboPgxrinSEl3",
- "CgYUUMQ9GuZjow1CsAWYe69as/LTSwql2SIu4XzYsjODbPkpt/HEZv+g023nbPli+enx1tLo4aVexxLT",
- "W5oCtmpWE6ATCFFKsQE+J+wADrpmiMxczVzAUQ50iQnSeNETU9Iw6n1gGc1zRUD1cCKT7vox/kHl1knr",
- "q/nMHf7qzvVxBziGV3fM2sPm/9aC3Pvum3Ny6ASmumdzFS3oILsscmt1CRStEBkjzWw5Dpus+Z6/5y9h",
- "yTgz34/f84xqerigiqXqsFIgv6Y55SkcrAQ59jkZL6mm73lP0xqsmBNkw5CyWuQsJRehRtywp62C0Ifw",
- "/v07mq/E+/cfetECff3VDRWVL3aA5JLptah04nK4EwmXVMa8MarO4UXItkjD2Khz4mBbUexyxB38uMyj",
- "Zam6uXz96ZdlbqYfsKFymWpmyYjSQnpdxCgoFhtc39fCHQySXnoTRqVAkV8LWr5jXH8gyfvq6OgJkFZy",
- "26/uyDc8uSthsiFjMNewa7/Aidt7DWy1pElJVzGvz/v37zTQElcf9eUCL9l5TrBbK6nOBw0jqGYCnh7D",
- "C2DxuHaCEE7uzPby9XriU8BPuITYxqgbjSv6pusVpNndeLk6qXq9Var0OjF7OzorZVjcr0xdxmNllCwf",
- "H6DYCmMwXcWTBZB0DemFK0UBRal381Z3H4LiFE0vOpiyRUpskgymyaPNfAGkKjPqVPGuBWmxIwq09kGg",
- "b+ECdueiybK/ToJyO19WDW1U5NRAuzTMGm5bB6O7+C7OCU1cZenTTjH/yLPFcc0Xvs/wRrYq7x1s4hhT",
- "tPI5hwhBZYQQlvkHSHCDiRp4t2L92PTMLWNhT75IwRIv+4lr0lyeXEhSOBs0cNvvBWDFI3GpyIIavV24",
- "Yj02JzSQYpWiKxjQkEO3xcTMy5arA4HsO/eiJ51Ydg+03nkTRdk2Tsyco5wC5othFbzMdALR/EjWM+ac",
- "AFiDzxFskaOaVEfsWaFDZct9ZIuKDaEWZ2CQvFE4PBptioSazZoqX0cIyy35vTxJB/gDc5zHKluEBv2g",
- "plJtX/cyt7tPe7dLV9/CF7XwlSzCq+WEqhRGw8ew7dhyCI4KUAY5rOzEbWPPKE2+dbNABo8fl8uccSBJ",
- "LByLKiVSZgtBNceMGwOMfvyQEGsCJpMhxNg4QBs9vgiYvBbh3uSr6yDJXb449bDRVxz8DfHUFhugbFQe",
- "URoRzgYcSKmXANTF8NXnVyeSFMEQxufEiLkNzY2Ycze+BkivwAKqrZ1yCi7m4MGQOjtigbcHy7XmZI+i",
- "m8wm1Jk80nGFbgTjhdgmNrctqvEutgvD79GYbcy0i21MW8riniILscU4FjxabIzwHlyG8fBoBDf8LVPI",
- "r9hv6DS3yIwNO65NxbhQIcs4c17NLkPqxJShBzSYIXa5H1SnuBECHWNHU+rVXX73XlLb6kn/MG9OtXlT",
- "dcmnw8S2/9AWiq7SAP36Vpi6nsSbrsYStVO0wzHapTQCFTLG9EZM9J00fVeQghzwUpC0lKjkIua6M3cb",
- "wBPnzHcLjBdYsIPy3YMgxkfCiikNjRHdhyR8DvMkxTphQiyHZ6dLuTTzeytEfUzZQjTYsTXNTz4DjJFd",
- "Mql0gh6I6BRMo28VXqq/NU3julI7ishW1WRZXDbgsBewSzKWV3F+deN+/9IM+7oWiapaoLxl3MaGLLAK",
- "bDS2cGRoG346OuFXdsKv6J3Nd9puME3NwNKwS3uMv8i+6EjeMXEQYcAYc/RXbZCkIwIySAntS8dAb7Kb",
- "E1NCD8asr73NlHnYe8NGfGLq0BllIUXnEhgMRmfB0E1k1BKmgyKq/VzNgT1Ay5Jl244t1EIdvDHTaxk8",
- "fOmpDhVwdR2wPRQI7J6xdBEJql1lrFHwbTncVpGPg0mUOW/XAgsFQjgUU76Ye59QdTrZPlqdA82/h93P",
- "pi1OZ3Y1n93OdBqjtYO4h9Zv6uWN0hld89aU1vKEXJPktCyl2NA8cQbmIdaUYuNYE5t7e/QnFnVxM+b5",
- "Nyev3jj0r+azNAcqk1pVGJwVtiv/MrOyBc0GNogvFm3ufF5nt6pksPh1FabQKH25Bld1N9BGe+UBG4dD",
- "sBWdkXoZjxDaa3J2vhE7xREfCZS1i6Qx31kPSdsrQjeU5d5u5rEdiObByU2rMRmVCiGAW3tXAidZcqfi",
- "pre747uj4a49Mikca6QucGFLXysieNeFjuHFu9J53QuKxf2sVaQvnHhVoCUhUTlL4zZWvlCGObj1nZnG",
- "BBsPKKMGYsUGXLG8YgEs00xNuOh2kAzGiBLTF4ocot1CuGdNKs5+q4CwDLg2nyTuys5GxWqKztreP06N",
- "7tAfywG2FvoG/G10jLCwZffEQyTGFYzQU9dD92V9ZfYTrS1SGG7duCSu4fAPR+wdiSPOescfjptt8OK6",
- "7XELXyHpyz/DGLYc9f4nUPzl1VXYHBgj+qQJU8lSit8hfs/D63EkFceX8mQY5fI78Akx5411p3mZpRl9",
- "cLmHtJvQCtUOUhjgelz5wC2HNQW9hZpyu9T2hYFWrFucYcKo0kMLv2EYh3MvEjenlwsaK7holAyD00nj",
- "AG7Z0rUgvrOnvaoTG+zoJPAl122ZzbIuQTZZcv2KLTdUGOywk1WFRjNArg11grn1/+VKRMBU/JJy+1CF",
- "6We3kuutwBq/TK9LIbFGgoqb/TNIWUHzuOaQpX0Tb8ZWzL7BUCkIivw7QPZ9G8tF7qGEOl3HkeZ0SY7m",
- "wUsjbjUytmGKLXLAFo9siwVVKMlrQ1TdxUwPuF4rbP54QvN1xTMJmV4rS1glSK3U4fWmdl4tQF8CcHKE",
- "7R49J/fRbafYBh4YKrrzeXb86DkaXe0fR7EDwL2hMSZNMhQn/3DiJM7H6Le0MIzgdlAPounk9hGtYcE1",
- "spts1yl7CVs6Wbd/LxWU0xXEI0WKPTjZvriaaEjr0IVn9gUYpaXYEabj44OmRj4NRJ8b8WfRIKkoCqYL",
- "59xRojD81FTwt4N6cPY5GVd81ePlP6KPtPQuos4l8tMaTe35Fps1erJf0wLaZJ0Tagtj5KyJXvAlocmp",
- "r7uD1WjrIrSWNmYsM3VUczCYYUlKybjGi0Wll8mXJF1TSVMj/g6G0E0WXzyNVOBtV4Lk10P8k9NdggK5",
- "iZNeDrC91yFcX3KfC54URqJkD5psj2BXDjpz4267Id/hOOipSpmBkgyyW9ViNxpI6lsxHh8BeEtWrOdz",
- "LX689sw+OWdWMs4etDIr9NPbV07LKISMFdNrtrvTOCRoyWCDsXvxRTIwb7kWMp+0CrfB/vN6HrzKGahl",
- "fi/HLgJfi8jt1FeFri3pLlY9Yh0Y2qbmg2GDhQM1J+0KvJ/e6eeNz33nk/niccU/ush+5iVFIvsZDCxi",
- "UB08upxZ/T3wf1PytdhOXdTODvEL+ycgTZQkFcuzn5uszE7xdUl5uo76sxam4y/NM1H15Oz5FK1Zt6ac",
- "Qx4FZ3XBX7zOGNFq/ymmjlMwPrFttx68nW5ncg3ibTQ9Un5AQ16mczNASNV2wlsdUJ2vREZwnKZAWiM9",
- "++8IBNWef6tA6VjyEH6wQV1otzT3XVtsmADP8LZ4QL6zL8GugbTK3+Atra4i4ErfWoN6VeaCZnMs5HD+",
- "zckrYke1fexjJ7bY8QovKe1ZdOxVQe3HaeHB/t2SeOrCdDjjsdRm1kpjNSqlaVHGkkNNi3PfADNQQxs+",
- "Xl9C6hyQl8GbjjaP1IAw/LBksjA3rhqa1V2QJ8x/tKbpGq9kLZE6zPLTq3R7rlTBy3j1Czd1QUTcdwZv",
- "V6jb1umeE2HuzZdM2QdAYQPtfNQ6OduZBHx+ant6suLcckpU9xgrHnATsnvkbKCGN/NHMesQ/poKuS1y",
- "f92i5WfYK1qgqVsBvfckns1urF8u8Q87p5QLzlIsjxQ7mt1LoVN8YBMqSXWNrH6Lux0a2VzRuut1mJyj",
- "4mAldi8IHeH6Rvjgq1lUyx32T41PUq6pJivQykk2yOb++QBnB2RcgStwie/KBnJSyJZfESVk1FWd1C6N",
- "a7IRpsUMXOy+Nd9eu2s/xotfMI4KviObC023ljp8yFCbWwHTZCVAufm0c4PVO9PnANNkM9h+OPAPH9pq",
- "MOiWM9O2Pug+qBPvkXYeYNP2hWnr6gTVP7cikO2gJ2XpBh1+XCKqD+gtHyRwxLOYeNdOQNwafghthN1G",
- "Q0nwPDWMBht0REOJ53CPMeqHFjqP+Bil1XIUtiA2hCtawYDxCBqvGIfmWc7IAZFGjwRcGNyvA/1UKqm2",
- "KuAkmXYONEfvc0ygKe1cD7cF1a0lZEiCc/RjDC9j80bEgOCoGzSKG+W7+jVQw92BMvECnyF2hOy/+IBa",
- "lVOiMswo6LwBERMcRnD7V2baB0B/G/R1IttdS2p3znVOoqEk0UWVrUAnNMtiFam+xq8Ev/riUrCFtKoL",
- "U5YlSbEmSrtITJ/b3ECp4KoqRsbyDW45XPCoSoQbwodd/ApjEspih//GqjIOr4wLwrh2GKCPuHCvUFxT",
- "b25D6mm9hqcTxVbJdErgmXJ7cjRD34zRm/53yum5WLUR+cSlIcakXLhGMfn2jTk4wsoJvVKj9mipCxtg",
- "0J3wT+HhtbFOyW1LJTzKerVH0dlTP7U1boAYfjRrjoffQOhtUBCD2vPVeg+HAnDTwXhxql3mmqZkVAQN",
- "ZgPZ6B2b94NYxC2nQxE7NmDHfO71nqYZ9vRshD1KUB8K1kfoex9nSkrKnGu8ERZ9yrqI9GFz4dimaxa4",
- "OwkX5z1osft+MxSTTRTjqxwIfu8+M3QBLp29fmfeztVHJfkrof3VPfNq4dVR8dH596MTcKjPawYdNNqe",
- "u5L2dpruTv79zzaGjQDXcvcnMOH2Fr33SFNf27XmqaYJqcshTyqP3DoV4+8tDdc/amoeIT+VQrGmBHfs",
- "IaaJsW7n+JZSUL+pD8sHmmwg1Vh3vXGgS4DrVHMygwWP/P2rDtLA3bEOCXTlj8ZqHvWLre850HppSUFq",
- "nS1UfTC9ws9JHSaFQgkr4K6Au3f22gkHk8Oel0tINdvsSQP7xxp4kGI090YI+15ukBXG6jBarCJyfRNb",
- "g9BYltYoPkE1v1ujM5QEcgG7e4q0uCFaOXvuz5WbFJBACqB0SAyLCBULQ7BWU+cZZqrmDKSCD/ux3aEp",
- "xTX45k6Q1HjDsTxLmhO3SXQcGTL+6MeksUzXa6X/YkToUKZY/9GAYWX7Jb7RoOr38HwBivBKSk77Zfou",
- "XQELTNqrHQW+lAUo/5vP0LWj5OwCwleB0C1zSWXmW0TtDN6EkYycR730Ll/wvov0sh6ZNUGa/YSeSOEn",
- "DMVNc2H0r2QonrkdFxk+no/RH7bkN0Z8GryWIN3raajs5UJBooUP6hzDY4wU7qH3mxBBDRZbtMgNlkB5",
- "29R4waKzFEueUBfZEk6QSCiowU4GlViGxxwj9gv73Wew+KKje80pNb/uLzTvw3OZ6hEx5Polcafl/syY",
- "m1hWGOf2rVYVK8vCDSlD038pRVal9oAON0ZtfZpc9GhElESNEml/lr37ZY4lwF4FeYYXsDu0qr8v1e+X",
- "MsTeqlB2DkFef2e179ToFL9f5ys7gdWd4Pk5DTfzWSlEngzY+k/71WW6e+CCpReQEXN2+MC2gWdLyH00",
- "MdfO3Mv1zldTKUvgkD04IOSE21Bi79dtlzfuDM7v6bHxtzhqVtmCT86mdPCex2MysRSTvKV882DGpZoC",
- "I/xuOZQFsqd2yXagso2kl5FHfA6mXkr7ntbuwyoNU1ksYlrKnicsIl5k/yaCf2HDZ6xoUbC0/4pCT5VY",
- "4mtUCY0AP60F+Lz1ViDrPNzhawzZZxpSahU4c3mgLK8kuMwB+2xOp5x+SfXaL59p3lezzJENCsP6bUl2",
- "quylwF9O3Js93X0hyiSHDbQcCS6doUpTUIptIHzvx3YmGUCJV/XuARKzkId81ZEhbu5JYGOdQt2oULGE",
- "tStF9kiMgcfYE8seaioLGYw2LKtoi37qFk+xTHzbPcR14g659uaIT663NdxzKUldzC1myHTpJH4JDf82",
- "T7t0FKTgCZYa5sBblDUVbqOMDJI2TtmbFdGYxA99m3ZkywTProxbXsIaO03wrrSuEbyp+V3XXdIfmt04",
- "7QEY32EPeqFBLngCxmtCDp3PHGH7Q02UYCqDnNCa/j4bn5tgI76CJbKy20zTVjyz0VntdQkMuOpFbRcd",
- "epepaz7FgjqCY5GxvtlVoasMa5WHjGNkt9zQ/NObTrHS0gnSw71zG59oaHsLiWxJqW4W5vaKTho7sLPd",
- "3dD8DZp6/wFmjaI+TgfK+TxqXcF7hlBk0pzkonnhDkGSS4RpnaKPviALl6JTSkiZYp3sxUtfRrk2NeGr",
- "As3zx+O2rX3z/FnoW7Dx0qsv5HVTklULPDEaDJst+pmFysDOjXJ5jPt6bBGhX0xGhbUy9hwXFy1vqS1x",
- "3QkDFBLu2GsaxD9d02varwIydXrWM2gOnUpBf56TT+sWbSMHdTO3qS7/PnHH6nZO8dTHy/Ga7hgqYAmC",
- "tawJokp+ffQrkbDEx2oEefgQB3j4cO6a/vq4/dls54cP488sf6ogAUsjB8ONG+OYn4fCxm1o9ECGQmc9",
- "KpZn+xijlW/SPPeEGRW/uIyzz/Lg1C/Wl9Pfqu7Rj+uEJ3UXAQkTmWtr8GCoIJNkQhKJ6xZJGUGrSFpJ",
- "pndYCMeb/tkv0XCG72pvofM216UT3NmnxQXUpZQa32Kl/On6naA5nkdGp8bgMI1P636zpUWZg9soX91b",
- "/A2efPk0O3ry6G+LL4+eHaXw9NnzoyP6/Cl99PzJI3j85bOnR/Bo+cXzxePs8dPHi6ePn37x7Hn65Omj",
- "xdMvnv/tnpFDBmWL6MynXc/+D77Klpy8OU3ODbINTWjJ6he1DRv7p2VoijsRCsry2bH/6X/7HXaQiqIB",
- "73+duazO2VrrUh0fHl5eXh6EXQ5X6ExItKjS9aEfp/+S8ZvTOjPHXi1xRW3ShTcZeFY4wW9vvzk7Jydv",
- "Tg+ClzKPZ0cHRweP8CHFEjgt2ex49gR/wt2zxnU/dMw2O/54NZ8droHm6Hs3fxSgJUv9J3VJVyuQB+6N",
- "HfPT5vGhVyUOPzpHytXYt8OwXPXhx5a/KdvTE8vZHn70VVrGW7fKoDg/W9BhIhZjzQ4XmPw5tSmooPHw",
- "VPCCoQ4/ooo8+Puhy4iLf8Srit0Dh94pG2/ZotJHvTW4dnq4J/kPP+J/kCcDtGz8aYDubBUrDvUdaJ88",
- "Znu46Ko6rKrm7dPMNu9F+7j6S7Yg5fG7afX4wQ9nbooZKOaKdKGUMFug2cQ+0aQR0VpWEBZPHCszcjWP",
- "PF25ZKtKdp7k7Tz2S5gi/3n242siJHF34jc0vajjrsjp0hb3kGLDMCUlC/KYTM96Or9VIHfNfNxxGU7A",
- "F/x3iT6FWpXtqPhaFf+AlRMQURQSj4+O7uzRrd7KXtkI9Rqcx+s2EHsy9YU/KVtsaGTj06NHdza3drDt",
- "rSfWBdeb1SnHWBcj+Yk92XBCT/+yE3qBN2jD8UvGM/tigqa4p+0Gxfl9+Zedn2aF96BxfHwGFCoAz+5w",
- "g316JjR6OM0JtrSzefKXnc0ZyA1LgZxDUQpJJct35CdeZ5cGFaX6x9xP/IKLS+4JYRTtqiio3LkjkJKu",
- "qAofrm+9/2c0PLpS6ODDGt6z+cyljsG2BMkK4FiV4sqdxK1f6/PZSrtD+6h0/+cddwlgOcTCqX7iCqzZ",
- "1Wd+73g6dGhj47MdT9/WJ2nvBMHd+8dxen8xa3xRpmC8zR8s8qfJ6GefkgqfdoN+sh31FgqxAVW/o9ww",
- "p9GyzM3FPqksRRHw8MHgzvqA1/649uq8Ef2RvCemAd5TZffsiZu+STwSTTUJzz3hjxb8lCdc6ydSO+kW",
- "dqh7sQWa/UsQ/EsQ3KEg0JXkg1s0OL8wJBhKG+1EUpquYUwe9E/L8KZbili9lbMRYeGqTAzJirO2rPhT",
- "33c//CnO9xeU+/3cWnEbg0ZlzkDWXEB5v/DHv6TA/xgpYCsYOZvSnGjIcxXufS1w79tQApfpwW2Ix0Q5",
- "0H3gM/bz4cf2AzMt455aVzoTl0FfdAjbaIa+za9+crH19+ElZTpZCumyPLAAcL+zBpofuvolnV+blOHe",
- "F8yDDn4MQ7Sivx7W9dWjH7uG19hXZ3gcaOSrT/nPjeMldGSghKxdGO8+GPmE1Tud8Gzs8seHhxg5vRZK",
- "H86u5h87Nvvw44eaJXxZt1kp2QazxD9c/f8AAAD//8pjb+JPxQAA",
+ "H4sIAAAAAAAC/+x9+5PbNtLgv4LS91X5ceLIr2TXU5X6bmIn2bk4icszyd63ti+ByJaEHRLgAqBGis//",
+ "+xUaAAmSIEXNTOzdq/3JHhGPRqPR6Be6P8xSUZSCA9dqdvphVlJJC9Ag8S+apqLiOmGZ+SsDlUpWaib4",
+ "7NR/I0pLxtez+YyZX0uqN7P5jNMCmjam/3wm4R8Vk5DNTrWsYD5T6QYKagbW+9K0rkfaJWuRuCHO7BDn",
+ "L2cfRz7QLJOgVB/Kn3i+J4yneZUB0ZJyRVPzSZFrpjdEb5girjNhnAgORKyI3rQakxWDPFMnfpH/qEDu",
+ "g1W6yYeX9LEBMZEihz6cL0SxZBw8VFADVW8I0YJksMJGG6qJmcHA6htqQRRQmW7ISsgDoFogQniBV8Xs",
+ "9O1MAc9A4m6lwLb435UE+B0STeUa9Oz9PLa4lQaZaFZElnbusC9BVblWBNviGtdsC5yYXifkh0ppsgRC",
+ "OXnz7Qvy9OnT52YhBdUaMkdkg6tqZg/XZLvPTmcZ1eA/92mN5mshKc+Suv2bb1/g/BdugVNbUaUgfljO",
+ "zBdy/nJoAb5jhIQY17DGfWhRv+kRORTNz0tYCQkT98Q2vtNNCef/rLuSUp1uSsG4juwLwa/Efo7ysKD7",
+ "GA+rAWi1Lw2mpBn07aPk+fsPj+ePH338j7dnyd/cn188/Thx+S/qcQ9gINowraQEnu6TtQSKp2VDeR8f",
+ "bxw9qI2o8oxs6BY3nxbI6l1fYvpa1rmleWXohKVSnOVroQh1ZJTBila5Jn5iUvHcsCkzmqN2whQppdiy",
+ "DLK54b7XG5ZuSEqVHQLbkWuW54YGKwXZEK3FVzdymD6GKDFw3QgfuKB/XmQ06zqACdghN0jSXChItDhw",
+ "Pfkbh/KMhBdKc1ep4y4rcrkBgpObD/ayRdxxQ9N5vica9zUjVBFK/NU0J2xF9qIi17g5ObvC/m41BmsF",
+ "MUjDzWndo+bwDqGvh4wI8pZC5EA5Is+fuz7K+IqtKwmKXG9Ab9ydJ0GVgisgYvl3SLXZ9v918dOPREjy",
+ "AyhF1/CaplcEeCoyyE7I+YpwoQPScLSEODQ9h9bh4Ipd8n9XwtBEodYlTa/iN3rOChZZ1Q90x4qqILwq",
+ "liDNlvorRAsiQVeSDwFkRzxAigXd9Se9lBVPcf+baVuynKE2psqc7hFhBd199WjuwFGE5jkpgWeMr4ne",
+ "8UE5zsx9GLxEiopnE8QcbfY0uFhVCSlbMchIPcoIJG6aQ/Awfhw8jfAVgOMHGQSnnuUAOBx2EZoxp9t8",
+ "ISVdQ0AyJ+Rnx9zwqxZXwGtCJ8s9fiolbJmoVN1pAEacelwC50JDUkpYsQiNXTh0GAZj2zgOXDgZKBVc",
+ "U8YhM8wZgRYaLLMahCmYcFzf6d/iS6rgy2dDd3zzdeLur0R310d3fNJuY6PEHsnI1Wm+ugMbl6xa/Sfo",
+ "h+Hciq0T+3NvI9n60tw2K5bjTfR3s38eDZVCJtBChL+bFFtzqisJp+/4Q/MXSciFpjyjMjO/FPanH6pc",
+ "swu2Nj/l9qdXYs3SC7YeQGYNa1Thwm6F/ceMF2fHehfVK14JcVWV4YLSluK63JPzl0ObbMc8ljDPam03",
+ "VDwud14ZObaH3tUbOQDkIO5KahpewV6CgZamK/xnt0J6oiv5u/mnLHPTW5erGGoNHbsrGc0HzqxwVpY5",
+ "S6lB4hv32Xw1TACsIkGbFgu8UE8/BCCWUpQgNbOD0rJMcpHSPFGaahzpPyWsZqez/1g09peF7a4WweSv",
+ "TK8L7GREVisGJbQsjxjjtRF91AizMAwaPyGbsGwPhSbG7SYaUmKGBeewpVyfNCpLix/UB/itm6nBt5V2",
+ "LL47KtggwoltuARlJWDb8J4iAeoJopUgWlEgXediWf9w/6wsGwzi97OytPhA6REYCmawY0qrB7h82pyk",
+ "cJ7zlyfku3BsFMUFz/fmcrCihrkbVu7WcrdYbVtya2hGvKcIbqeQJ2ZrPBqMmH8XFIdqxUbkRuo5SCum",
+ "8V9c25DMzO+TOv9rkFiI22HiQkXLYc7qOPhLoNzc71BOn3CcueeEnHX73oxszChxgrkRrYzupx13BI81",
+ "Cq8lLS2A7ou9SxlHJc02srDekptOZHRRmIMzHNAaQnXjs3bwPEQhQVLowPB1LtKrv1C1uYMzv/Rj9Y8f",
+ "TkM2QDOQZEPV5mQWkzLC49WMNuWImYao4JNlMNVJvcS7Wt6BpWVU02BpDt64WGJRj/2Q6YGM6C4/4X9o",
+ "Tsxnc7YN67fDnpBLZGDKHmfnZMiMtm8VBDuTaYBWCEEKq+ATo3UfBeWLZvL4Pk3ao2+sTcHtkFsE7pDY",
+ "3fkx+FrsYjB8LXa9IyB2oO6CPsw4KEZqKNQE+F46yATuv0MflZLu+0jGsacg2SzQiK4KTwMPb3wzS2Oc",
+ "PVsKeTPu02ErnDQmZ0LNqAHznXeQhE2rMnGkGDFb2QadgRov3zjT6A4fw1gLCxea/gFYUGbUu8BCe6C7",
+ "xoIoSpbDHZD+Jsr0l1TB0yfk4i9nXzx+8uuTL740JFlKsZa0IMu9BkXuO92MKL3P4UF/ZagdVbmOj/7l",
+ "M2+obI8bG0eJSqZQ0LI/lDWAWhHINiOmXR9rbTTjqmsApxzOSzCc3KKdWNu+Ae0lU0bCKpZ3shlDCMua",
+ "WTLiIMngIDEdu7xmmn24RLmX1V2osiClkBH7Gh4xLVKRJ1uQiomIN+W1a0FcCy/elt3fLbTkmipi5kbT",
+ "b8VRoIhQlt7x6XzfDn254w1uRjm/XW9kdW7eKfvSRr63JCpSgkz0jpMMltW6pQmtpCgIJRl2xDv6O9Ao",
+ "ClyyAi40LcqfVqu7URUFDhRR2VgBysxEbAsj1ytIBbeREAe0MzfqFPR0EeNNdHoYAIeRiz1P0c54F8d2",
+ "WHEtGEenh9rzNNBiDYw5ZOsWWd5eWx1Ch53qnoqAY9DxCj+joeMl5Jp+K+RlYwn8ToqqvHMhrzvn1OVQ",
+ "txhnSslMX69DM77O29E3awP7SWyNn2VBL/zxdWtA6JEiX7H1RgdqxWspxOruYYzNEgMUP1ilLDd9+qrZ",
+ "jyIzzERX6g5EsGawhsMZug35Gl2KShNKuMgAN79SceFsIF4DHcXo39ahvKc3Vs9agqGulFZmtVVJ0Hvb",
+ "uy+ajglN7QlNEDVqwHdVOx1tKzudjQXIJdBsT5YAnIilcxA51xUukqLrWXvxxomGEX7RgquUIgWlIEuc",
+ "YeogaL6dvTr0CJ4QcAS4noUoQVZU3hrYq+1BOK9gn2CghCL3v/9FPfgM8GqhaX4Asdgmht5azXdewD7U",
+ "06YfI7ju5CHZUQnE3ytEC5Rmc9AwhMKjcDK4f12Iert4e7RsQaI/7g+leD/J7QioBvUPpvfbQluVA+F/",
+ "Tr01Ep7ZME658IJVbLCcKp0cYsumUUsHNysIOGGME+PAA4LXK6q09SEznqHpy14nOI8VwswUwwAPqiFm",
+ "5F+8BtIfOzX3IFeVqtURVZWlkBqy2Bo47Ebm+hF29VxiFYxd6zxakErBoZGHsBSM75BlV2IRRHXtanFB",
+ "Fv3FoUPC3PP7KCpbQDSIGAPkwrcKsBuGQA0AwlSDaEs4THUop467ms+UFmVpuIVOKl73G0LThW19pn9u",
+ "2vaJi+rm3s4EKIy8cu0d5NcWszb4bUMVcXCQgl4Z2QPNINbZ3YfZHMZEMZ5CMkb5qOKZVuEROHhIq3It",
+ "aQZJBjnd9wf92X4m9vPYALjjjborNCQ2iim+6Q0l+6CRkaEFjqdiwiPBLyQ1R9CoAg2BuN4HRs4Ax44x",
+ "J0dH9+qhcK7oFvnxcNl2qyMj4m24FdrsuKMHBNlx9CkAD+ChHvrmqMDOSaN7dqf4b1BuglqOOH6SPaih",
+ "JTTjH7WAARuqCxAPzkuHvXc4cJRtDrKxA3xk6MgOGHRfU6lZykrUdb6H/Z2rft0Jom5GkoGmLIeMBB+s",
+ "GliG/YmNv+mOeTNVcJLtrQ9+z/gWWU7OFIo8beCvYI8692sb2BmYOu5Cl42Mau4nygkC6sPFjAgeNoEd",
+ "TXW+N4Ka3sCeXIMEoqplwbS2AdttVVeLMgkHiPo1RmZ0TjwbFOl3YIpX8QKHCpbX34r5zOoE4/BddhSD",
+ "FjqcLlAKkU+wkPWQEYVgUrwHKYXZdeZix330sKekFpCOaaMHt77+76kWmnEF5L9FRVLKUeWqNNQyjZAo",
+ "KKAAaWYwIlg9p4vsaDAEORRgNUn88vBhd+EPH7o9Z4qs4No/uDANu+h4+BDtOK+F0q3DdQf2UHPcziPX",
+ "Bzp8zMXntJAuTzkcWeBGnrKTrzuD114ic6aUcoRrln9rBtA5mbspaw9pZFpUBY47yZcTDB1bN+77BSuq",
+ "nOq78FrBluaJ2IKULIODnNxNzAT/Zkvzn+puB3S6JgqMFQVkjGrI96SUkIKNzjeimqrHPiE2bi/dUL5G",
+ "CV2Kau0Cx+w4yGErZW0hsuK9IaJSjN7xBK3KMY7rgoX9Aw0jvwA1OlTXJG01hmtaz+fe5Ey5Cv3ORUz0",
+ "Ua/UfDaoYhqkbhsV0yKn/cpkAvdtCVgBfpqJJ/ouEHVG2OjjK9wWQ71mc/8YG3kzdAzK/sRBKFvzcSia",
+ "zei3+f4OpAw7EJFQSlB4J4R2IWW/ilX4osxdGmqvNBR907nt+uvA8XszqKAJnjMOSSE47KOPqBmHH/Bj",
+ "9DjhvTTQGSWEob5dob8Ffwes9jxTqPG2+MXd7p7QrotIfSvkXfkg7YCT5ekJLr+D/m035U0dkzTPI748",
+ "996kywDUvH7fziShSomUoZB0nqm5PWjO/ecep7TR/7qOor2Ds9cdt+O0Cp8yolEW8pJQkuYMTbaCKy2r",
+ "VL/jFI1CwVIj0UZe+x02E77wTeJ2yYjZ0A31jlOMNKtNRdEIiRVE7CLfAnhroarWa1C6o1ysAN5x14px",
+ "UnGmca7CHJfEnpcSJIb8nNiWBd2TlaEJLcjvIAVZVrotbuNzKqVZnjsPmpmGiNU7TjXJgSpNfmD8cofD",
+ "eS+7P7Ic9LWQVzUW4rf7GjgoppJ4VNR39isGrLrlb1zwKj5/t5+tz8WM37y52qPNqHnS/X/u/9fp27Pk",
+ "bzT5/VHy/H8s3n949vHBw96PTz5+9dX/bf/09ONXD/7rP2M75WGPPfZxkJ+/dKro+UvUNxqnSw/2T2Zw",
+ "LxhPokQWhk90aIvcx4etjoAetK1RegPvuN5xQ0hbmrPM8JabkEP3humdRXs6OlTT2oiO9cmv9Ugp/hZc",
+ "hkSYTIc13liK6gcSxp/VoRfQvZTD87KquN1KL33bVyM+oEus5vXTSZtV5ZTgu7oN9dGI7s8nX3w5mzfv",
+ "4ervs/nMfX0foWSW7WKvHjPYxZQzd0DwYNxTpKR7BTrOPRD2aOyaDaYIhy3AaPVqw8pPzymUZss4h/Ox",
+ "+M7Is+Pn3AbJm/ODPsW9c1WI1aeHW0uADEq9iWVbaAlq2KrZTYBOnEcpxRb4nLATOOkaWTKjL7oouhzo",
+ "Cl/9o/YppmhD9TmwhOapIsB6uJBJlowY/aDI47j1x/nMXf7qztUhN3AMru6ctQPR/60FuffdN5dk4Rim",
+ "umcf4NqhgyeTEVXavQpqRQAZbmZzzFgh7x1/x1/CinFmvp++4xnVdLGkiqVqUSmQX9Oc8hRO1oKc+odG",
+ "L6mm73hP0hpMAxU88SJltcxZSq5ChaQhT5vaoz/Cu3dvab4W79697wVD9NUHN1WUv9gJEiMIi0onLjFB",
+ "IuGaypizSdUP03Fkm3lkbFYrZIvKWiR94gM3fpzn0bJU3Qeq/eWXZW6WH5Chcs8vzZYRpYX0sogRUCw0",
+ "uL8/CncxSHrt7SqVAkV+K2j5lnH9niTvqkePngJpvdj8zV35hib3JUy2rgw+oO0aVXDhVq2EnZY0Kek6",
+ "5tN69+6tBlri7qO8XKCNI88Jdmu9FPWR8DhUswCPj+ENsHAc/eoNF3dhe/kkVPEl4CfcQmxjxI3G037T",
+ "/Qrejt54uzrvT3u7VOlNYs52dFXKkLjfmTo3zdoIWT78QbE1aqsujc8SSLqB9MrlV4Gi1Pt5q7uPsHGC",
+ "pmcdTNnMO/blF+Z+QI/AEkhVZtSJ4pTvu4/wFWjt43jfwBXsL0WTOuKYV/ftR+Bq6KAipQbSpSHW8Ni6",
+ "Mbqb78K4ULEvS/+WGh/VebI4renC9xk+yFbkvYNDHCOK1iPlIURQGUGEJf4BFNxgoWa8W5F+bHlGy1ja",
+ "my+ShcfzfuKaNMqTi7gKV4NWd/u9AEzjJa4VWVIjtwuXgco+dA64WKXoGgYk5NApM/E5ccuRg4Mcuvei",
+ "N51YdS+03n0TBdk2Tsyao5QC5oshFVRmOnF2fibr93OeCUws6RC2zFFMqgMSLdOhsuUcs5nyhkCLEzBI",
+ "3ggcHow2RkLJZkOVT46FOcT8WZ4kA/yBD/fH0rWcByFiQaKwOhmL57ndc9rTLl3SFp+pxadnCVXLCalW",
+ "jISPUemx7RAcBaAMcljbhdvGnlCaJALNBhk4flqtcsaBJLFos8AMGlwzbg4w8vFDQqwFnkweIUbGAdjo",
+ "z8aByY8iPJt8fQyQ3CVBoH5s9IQHf0P8vZaNvzYijygNC2cDXq3UcwDqQhTr+6sTKIvDEMbnxLC5Lc0N",
+ "m3MaXzNIL2sIiq2dHCEuouLBkDg74gCxF8tRa7JX0U1WE8pMHui4QDcC8VLsEvtgMyrxLndLQ+/RkHR8",
+ "Pho7mDY/yz1FlmKHUTp4tdgQ6AOwDMPhwQg0/B1TSK/Yb+g2t8CMTTsuTcWoUCHJOHNeTS5D4sSUqQck",
+ "mCFyuR+kXLkRAB1jR5O/2Cm/B5XUtnjSv8ybW23epBLzr31ix3/oCEV3aQB/fStMnSTldVdiidop2sEm",
+ "7fwwgQgZI3rDJvpOmr4rSEEOqBQkLSEquYp5To1uA3jjXPhugfECs9BQvn8QRDBJWDOloTGi+ziJz2Ge",
+ "pJj8TojV8Op0KVdmfW+EqK8p60bEjq1lfvIVYAjwikmlE/RARJdgGn2rUKn+1jSNy0rtGCmbKpZlcd6A",
+ "017BPslYXsXp1c37/Usz7Y81S1TVEvkt4zZgZYmpjaORkyNT2+Da0QW/sgt+Re9svdNOg2lqJpaGXNpz",
+ "/Iuciw7nHWMHEQKMEUd/1wZROsIggxevfe4YyE2Bj/9kzPraO0yZH/tg1I5/dzt0R9mRomsJDAajq2Do",
+ "JjJiCdNBZuD+U9SBM0DLkmW7ji3UjjqoMdOjDB4+n1oHC7i7brADGAjsnrHXMBJUO3VeI+DbHM+tzDUn",
+ "kzBz2U5wFzKEcCqmfIWCPqLq13KHcHUJNP8e9r+Ytric2cf57Ham0xiu3YgHcP263t4ontE1b01pLU/I",
+ "kSinZSnFluaJMzAPkaYUW0ea2Nzboz8xq4ubMS+/OXv12oH/cT5Lc6AyqUWFwVVhu/JfZlU2S9/AAfEZ",
+ "0I3O52V2K0oGm1+nFguN0tcbcKmkA2m0l/OycTgER9EZqVfxCKGDJmfnG7FLHPGRQFm7SBrznfWQtL0i",
+ "dEtZ7u1mHtqBaB5c3LTEqVGuEA5wa+9K4CRL7pTd9E53/HQ01HWAJ4VzjSS7Lmw+d0UE77rQMeZ5Xzqv",
+ "e0ExY6W1ivSZE68KtCQkKmdp3MbKl8oQB7e+M9OYYOMBYdSMWLEBVyyvWDCWaTYlJ00HyGCOKDJVNC1O",
+ "g7ulcLV6Ks7+UQFhGXBtPkk8lZ2DiulNnLW9f50a2aE/lxvYWuib4W8jY4TZWrs3HgIxLmCEnroeuC9r",
+ "ldkvtLZImR8Cl8QRDv9wxt6VOOKsd/ThqNkGL27aHrewtE6f/xnCsDnWD9f18cqrSxs7MEe0Tg9TyUqK",
+ "3yGu56F6HHlo5PPTMoxy+R3Chw5hdYoWi6mtO025oWb2we0ekm5CK1Q7SGGA6nHnA7ccJsr0FmrK7Vbb",
+ "shmtWLc4wYRRpQs7fkMwDuZeJG5Or5c0lkXUCBkGprPGAdyypWtBfGePe1W/trCzk8CXXLdl9hF5CbJ5",
+ "A9hPSHNDgcFOO1lUaCQDpNpQJphb/1+uRGSYil9TbquvmH72KLneCqzxy/S6FhJTQKi42T+DlBU0j0sO",
+ "Wdo38WZszWxhkUpBULnCDWSLNlkqctU/6jdEDjXnK/JoHpTPcbuRsS1TbJkDtnhsWyypQk5eG6LqLmZ5",
+ "wPVGYfMnE5pvKp5JyPRGWcQqQWqhDtWb2nm1BH0NwMkjbPf4ObmPbjvFtvDAYNHdz7PTx8/R6Gr/eBS7",
+ "AFxhmDFukiE7+atjJ3E6Rr+lHcMwbjfqSfS1vK0MN8y4Rk6T7TrlLGFLx+sOn6WCcrqGeKRIcQAm2xd3",
+ "Ew1pHbzwzJY1UlqKPWE6Pj9oavjTQPS5YX8WDJKKomC6cM4dJQpDT01ZCjupH87WSHIZhT1c/iP6SEvv",
+ "IuookZ/WaGrvt9iq0ZP9Iy2gjdY5oTbvR86a6AWf55yc+7RCmGK5zqxscWPmMktHMQeDGVaklIxrVCwq",
+ "vUr+TNINlTQ17O9kCNxk+eWzSFrpdnpTfhzgnxzvEhTIbRz1coDsvQzh+pL7XPCkMBwle9C89ghO5aAz",
+ "N+62G/Idjg89VSgzoySD5Fa1yI0GnPpWhMdHBrwlKdbrOYoej17ZJ6fMSsbJg1Zmh35+88pJGYWQsVyB",
+ "zXF3EocELRlsMXYvvklmzFvuhcwn7cJtoP+8ngcvcgZimT/LMUXgaxHRTn2q89qS7mLVI9aBoWNqPhgy",
+ "WLqh5qSdVvrT89G7iYKKe7q8Ybvv2DJfPB7wjy4iPjO54AY2vny7kgFCCdLqR0kmq78HPnZKvha7qYTT",
+ "OYWeeP4JUBRFScXy7Jfm5WenaoGkPN1EfWZL0/HXpr5avTh7B0bT/m0o55BHh7Py5q9eLo1Izn8XU+cp",
+ "GJ/YtltIwS63s7gG8DaYHig/oUEv07mZIMRq+1FdHbSdr0VGcJ4mx1xzXPsFOII06f+oQOnYAyX8YAPH",
+ "0DZq2IHN0k2AZ6iRnpDvbAnlDZBWAiHUBH2miPar6arMBc3mmMHi8puzV8TOavvYKkE2S/gaFaH2Kjo2",
+ "sSB95rQQZF/wJ/48Yvo44/HaZtVKJ3VS79gDVNOiSTvOOn4CVJFC7JyQl0ExVPtW1Qxh6GHFZGG0uno0",
+ "Kx8hTZj/aE3TDap9LdY6TPLT09t7qlRBScm6NFSdUxLPnYHbZbi3Ce7nRBjd/JopWzkXttB+81o/AHdm",
+ "B/8Gtr08WXFuKeXkiFuuziB5LNo9cPaK9K6EKGQdxB8p9NvqEMdm+7/AXtEUV93SAb1akvYFZV3yx1dE",
+ "TykXnKWYYCp2RbsSu1P8bBNycXUNuf6IuxMaOVzRggV1KJ7D4mAJA88IHeL6hv7gq9lUSx32T421XDdU",
+ "kzVo5TgbZHNfd8PZGhlX4HKEYkHmgE8K2fJdIoeMusOT2m1yJBnh05sB5fFb8+1HZ1rAmPQrxlGJcGhz",
+ "gp+1BmIFUG00D6bJWoBy62m/P1ZvTZ8TfIqbwe79ia8YimNY159ZtvVz94c6815v52U2bV+Yti5BUv1z",
+ "K8rZTnpWlm7S4aosUXlA7/gggiPey8S7jwLk1uOHo42Q22i4Ct6nhtBgi85uKPEe7hFGXaGkU/3KCK2W",
+ "orAFsWFi0SwJjEfAeMU4NPVsIxdEGr0ScGPwvA70U6mk2oqAk3jaJdAcPdwxhqa0c2/cdqhueiiDElyj",
+ "n2N4G5viKgOMo27QCG6U7+syuoa6A2HiBdbvdojsl0pBqcoJURm+WugUT4kxDsO4fXmm9gXQPwZ9mch2",
+ "15Lak3PMTTT0EHVZZWvQCc2yWMrWr/Erwa8kq1BygB2kVZ3asyxJinlX2olo+tTmJkoFV1UxMpdvcMvp",
+ "gmpEEWoIKyL5HcaHLss9/hvLazm8My7Q4+hQQx/VkR2XfakfOhmTeg1NJ4qtk+mYwDvl9uhopr4ZoTf9",
+ "75TSc7FuA/KJ00+Mcblwj2L87RtzcYTZGXrJWu3VUidPwMA+4WtIotpYP/ttcyW8ynrZW9GhVNeoGzdA",
+ "DFebm+PlNxDeGyTdoPZ+tR7KoSDfdDAmnWr3Ok5TMsqCBl8c2Qgh+7YIoYhbZ4eigmxQkPnc6z1NMuzJ",
+ "2Tqe+DBAqA836wP0vY9lJSVlzv3eMIs+Zl3Ue/8dwpR42GaDu4twseSDFrvvt0Nx3z4ZG37vVqO6Avdk",
+ "vpSwZaLyjm0f+eRVQvtrq7ZTHXkfXX/f8IpTfV5z6KDx9tJVBbDLdDr597/YODkCXMv9P4Ept7fpvTpX",
+ "fWnXmqeaJqROKD0pwXTrVpySqDCWE8/Jhq1KWwfqhPXI6uUUcaBf92s+O8+OujBjeRVndpTYsYtX8RpO",
+ "O9WkmsIjVgrFmrzusfJeE0MML7FCV5A2qz+Wj+/ZQqoxmX8TtyABjkmiZSYLCob+O/3UgDpdR2K6rFNj",
+ "qab6GfwP3PG912DBi0ab/fxkemKlszo6Dfk0ZkNeA3c1O9vvPCZHm69WkGq2PfD67q8b4MHLrrm3y9ja",
+ "28FjPFZHL2PyluOtjg1AY4/jRuEJkijeGpyhtzdXsL+nSIsaounY5/6qvUneDsQAcofEkIhQsegPa0h2",
+ "DnmmaspALPhoK9sdmgxog5WcgrekN5zLk6S5OJr3pSNTxkvJTJrLdD3q1TUG4g490OtXohjWP15i4Q9V",
+ "V1n0eT9CLZ2c97MjXru8IfhWsvad+AwioPxv/mG0nSVnVxDWmkJP1TWVmW8RNb14q04ych/1XtX5Kgpd",
+ "oFf1zKyJje2/o4rk28II6DQXRoxIhsLI2+GodSzHPWWDbmz6dwy0NXCtQLqafCj/5kJBooWPpR2DYwwV",
+ "NrLoRkhQgzkuLXCDmWfeNKl1MNcvxUwz1AUUhQskEgpqoJNBApzhOceQ/cJ+9w+HfK7Xgxamml4PFx3w",
+ "UdFM9ZAYUv2KuNvy8IOkmxibGOe27rOKZcPhINvekFKKrErtBR0ejNogNznX1Agridpp0v4qOzpC8Krz",
+ "CvYLqwT5ag1+B0OgreRkQQ+yKHQ2+U7NbyoG9/pOwPuclqv5rBQiTwacHef9FD5dir9i6RVkxNwUPnpw",
+ "oPINuY829tqbfb3Z+5Q1ZQkcsgcnhJxxG6/tHdvtHNKdyfk9PTb/DmfNKptVyxnVTt7xeOAr5ruSt+Rm",
+ "fphxHqbAsLpbTmUHOZAgZjeQPkjS60gdqJOpWnnf1dytzdMQlYUiJpM0ZWcOxMnUITJN5Y8mTKYvHeS5",
+ "uE6QipI6/1dM5zDt2kzSZzxtuhlsLyGIt6HKXaB7sqEZSYWUkIY94k8cLFCFkJDkAsNvYp7BlTbyUIFx",
+ "zZzkYk1EadRcm0bP+1CiZWmCuewzW9szsY6agUQGoNyzWjeNbdyfZ6R6zfGVcS43EXsLItpj+ejyN45Q",
+ "jq5aEYA5gUAP25rOYtV92uvq1ocaqtamRcHSOLr/taJMBmNDDtQuiqyvJkdXWsm/ChzAVdRlO+4htXXo",
+ "llP9pHXO5InHIgBg2HPagmGS//RYMFZY1zGhESSf11LrvFV2l3XOvs9nZ2k8pVZr3QAxY1cS3Cs1W4Cu",
+ "UzmnpHrjbzHTvK9bGj0FFD4hs+U/qLKWEG+RcdXvuuKBKJMcttByKLunc1WaglJsC2HlPNuZZAAl2ie7",
+ "UnPMUxpyuY4o5daeBL62KdiNylYWsXanyAHBKSrm7Xhij4maepQMRFuWVbSFP3WLWmRDZcgibNjDOpFT",
+ "HM0k4osbYxEHYxuQ5qPnksdDG8KXm7VRBGfLauOpJcLmZKuSXvNhJSJid6r97bdfB8HBiOq8pB688mW9",
+ "KzdVIAcpY4wwevUDozKHAl//NUx64sUt1zciY1lTF1ORAZhqzjNG70ETHRY0K+ieZGy1AmmN+UpTnlGZ",
+ "hc0ZJylITZnRbPbq5mKtgVZWMD8o2RruioN6BhOTcdEuZQHJ905luIXUiZ6biMRpr1othkok9nYl/pyA",
+ "7ox0jXFVA0TgHkKjbG0PmOAoIJGCXsGR8yj2O4xPg+lJnO1PC5x1yhQxX+sNc6tNYt39MITI7RYUQxz3",
+ "DIWpF5s3XdJGs6Al2V+QXRr/obk4p5Vl9B0OgBc6DIPCjN5248D5zI+jfqiREizl/RAltJZ/yAfpFthI",
+ "GsEWOUagNdhEuDagvr0vgYNZvaj9tkM1RLvuXcyzKLgt8tdzC1veZKv2BYRjzoLc0vzTu3YxAecZ4gOy",
+ "N8PG4NA3GCLZolLd7GXCKzpp7sAPeHdT89foiv4rmD2KaqVuKCfC1GK9D+bBm4Xm1nCx8iW8tsDJNY5p",
+ "49gef0mW7uV2KSFlqisaXfvqGrUrDItNudcgO33A93Zonb8IfQsyXnlNg/zYZOpHHX/NGwibI/qZmcrA",
+ "yY1SeYz6emQRwV+MR4Up1A5cF1etADdb+aTzckNIuONAtyBk/chAt35yuKnLs8Fc5tKpFPTXOfm2buE2",
+ "clE3a5sapdlH7lg69ynBlfEqDaY7RndahGCJE4Kgkt8e/0YkrLCGoSAPH+IEDx/OXdPfnrQ/m+P88GFU",
+ "OvtkcZ0WR24MN2+MYn4ZeulnX7MNPCrt7EfF8uwQYbSeCDdVQPER7K8uEcFnqUP6q4016R9VVwvuFgFy",
+ "FjGRtbYmD6YKHv9OePfrukVe+aIfJ60k03vMj+jtB+zXaATqd3U0k4uGq/VDd/dpcQV1hs0m9qlS/nb9",
+ "TtAc7yOrtnJzC4n8hHyzo0WZgzsoX91b/gme/vlZ9ujp4z8t//zoi0cpPPvi+aNH9Pkz+vj508fw5M9f",
+ "PHsEj1dfPl8+yZ48e7J89uTZl188T58+e7x89uXzP90zfMiAbAGd+Ww8s/+NxXqTs9fnyaUBtsEJLdn3",
+ "sLd1AQ0Z+4qDNMWTCAVl+ezU//Q//Qk7SUXRDO9/nblkH7ON1qU6XSyur69Pwi6LNQY7JFpU6Wbh5+mV",
+ "JDx7fV57iawVCHfUvpP11j1PCmf47c03F5fk7PX5SVCv/nT26OTRyWMsb14CpyWbnc6e4k94eja47wtH",
+ "bLPTDx/ns8UGaI6xgeaPArRkqf8kgWZ79391TddrkCeuDKP5aftk4cWKxQcX9PFx7NsirGiy+NCKjckO",
+ "9MSKB4sPPpHfeOtWpjwXExR0mAjFWLPFEvODTG0KKmg8vBRUNtTiA4rLg78vXEKD+EdUW+x5WPgAsnjL",
+ "FpY+6J2BtdMjpTrdVOXiA/4H6TMAyz4fWugdX6DtY/GhtRr3ubea9u9N97DFthAZeIDFamUTk459Xnyw",
+ "/wYTwa4EyYzghyF77lcbWr2wxfD7P++5e9WYQywg7meuwCqmPp3BnqdNgH99ZM8z3/hiz1MvofpnMngQ",
+ "nzx6ZKd/hv+5m8Kn7Qc7kfKnFzW8mFsOI6YQhsefDoZzjhGlhn8Ry58/zmdffEosnBudndOcYEs7/dNP",
+ "uAkgtywFcglFKSSVLN+Tn3mdhCBIbhijwCsurrmH3FzuVVFQuUehuRBbUHUB8oY4iQQjprha5FIUAQ3j",
+ "7ULXCsMhsKzEbG6fZ71HwUjHZARvr+nP5G1VzeDtU/HdwTNx02LeI/Fwk+A8EMBqh59S+7iuLdx5Q2Sn",
+ "uhfboNm/GcG/GcEdMgJdST54RIP7C4O6obQRbCSl6QbG+EH/tgwu+FkpYsFRFyPMwqVOGeIVF21eEVQu",
+ "OX07LWWZczBY23EGirls7qg3GKG4EetlzZH8mUf3VLDXY/loP77/p7jfX1Duz3Nrx21cIZU5A1lTAeX9",
+ "bDb/5gL/33ABm5aL2n2dEw15rsKzrwWefetscW91uHWCTeQD3cq4sZ8XH9qVmVpKgtpUOhPXQV80mVt/",
+ "T193qGuVtv5eXFOmk5WQ7p0OZs7ud9ZA84VLytP5tXkH3/uCj/uDH8N4k+ivi7owQfRjVx2NfXXq2EAj",
+ "7/72nxvTVGjqQQ5ZG3nevjf8CdPeOubZWC5OFwuMfd8IpRezj/MPHatG+PF9TRI+V+GslGyLqQ/ef/x/",
+ "AQAA//+92+QcXcsAAA==",
}
// GetSwagger returns the content of the embedded swagger specification file
diff --git a/daemon/algod/api/server/v2/generated/experimental/routes.go b/daemon/algod/api/server/v2/generated/experimental/routes.go
index 3e8edb58b..e253331bf 100644
--- a/daemon/algod/api/server/v2/generated/experimental/routes.go
+++ b/daemon/algod/api/server/v2/generated/experimental/routes.go
@@ -75,166 +75,175 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9+3PcNtLgv4Ka76vy44aS/Ih3rarUd4qdZHVxHJelZO9by5dgyJ4ZrEiAAUDNTHz6",
- "36/QAEiQBDnUI/Zu1f1kawg0Go1Go9EvfJqloigFB67V7PjTrKSSFqBB4l80TUXFdcIy81cGKpWs1Ezw",
- "2bH/RpSWjK9m8xkzv5ZUr2fzGacFNG1M//lMwu8Vk5DNjrWsYD5T6RoKagDrXWla15C2yUokDsSJBXH6",
- "enY98oFmmQSl+lj+xPMdYTzNqwyIlpQrmppPimyYXhO9Zoq4zoRxIjgQsSR63WpMlgzyTB34Sf5egdwF",
- "s3SDD0/pukExkSKHPp6vRLFgHDxWUCNVLwjRgmSwxEZrqokZweDqG2pBFFCZrslSyD2oWiRCfIFXxez4",
- "w0wBz0DiaqXArvC/SwnwBySayhXo2cd5bHJLDTLRrIhM7dRRX4Kqcq0ItsU5rtgVcGJ6HZAfK6XJAgjl",
- "5P13r8izZ89emokUVGvIHJMNzqoZPZyT7T47nmVUg//c5zWar4SkPEvq9u+/e4Xjn7kJTm1FlYL4Zjkx",
- "X8jp66EJ+I4RFmJcwwrXocX9pkdkUzQ/L2ApJExcE9v4XhclHP+LrkpKdbouBeM6si4EvxL7OSrDgu5j",
- "MqxGoNW+NJSSBuiHo+Tlx09P5k+Orv/jw0nyD/fnV8+uJ07/VQ13DwWiDdNKSuDpLllJoLhb1pT36fHe",
- "8YNaiyrPyJpe4eLTAkW960tMXys6r2heGT5hqRQn+UooQh0bZbCkVa6JH5hUPDdiykBz3E6YIqUUVyyD",
- "bG6k72bN0jVJqbIgsB3ZsDw3PFgpyIZ4LT67kc10HZLE4HUreuCE/nWJ0cxrDyVgi9IgSXOhINFiz/Hk",
- "TxzKMxIeKM1ZpW52WJHzNRAc3Hywhy3SjhuezvMd0biuGaGKUOKPpjlhS7ITFdng4uTsEvu72RiqFcQQ",
- "DRendY6azTtEvh4xIsRbCJED5Ug8v+/6JONLtqokKLJZg167M0+CKgVXQMTin5Bqs+z/6+ynt0RI8iMo",
- "RVfwjqaXBHgqMsgOyOmScKED1nC8hDQ0PYfm4fCKHfL/VMLwRKFWJU0v4yd6zgoWmdWPdMuKqiC8KhYg",
- "zZL6I0QLIkFXkg8hZCHuYcWCbvuDnsuKp7j+zbAtXc5wG1NlTndIsIJuvz6aO3QUoXlOSuAZ4yuit3xQ",
- "jzNj70cvkaLi2QQ1R5s1DQ5WVULKlgwyUkMZwcQNsw8fxm+GT6N8Beh4IIPo1KPsQYfDNsIzZnebL6Sk",
- "KwhY5oD87IQbftXiEnjN6GSxw0+lhCsmKlV3GsARhx7XwLnQkJQSlizCY2eOHEbA2DZOAhdOB0oF15Rx",
- "yIxwRqSFBiusBnEKBhy/7/RP8QVV8OL50BnffJ24+kvRXfXRFZ+02tgosVsycnSar27DxjWrVv8J98Nw",
- "bMVWif25t5BsdW5OmyXL8ST6p1k/T4ZKoRBoEcKfTYqtONWVhOML/tj8RRJypinPqMzML4X96ccq1+yM",
- "rcxPuf3pjVix9IytBohZ4xq9cGG3wv5j4MXFsd5G7xVvhLisynBCaeviutiR09dDi2xh3pQxT+rbbnjx",
- "ON/6y8hNe+htvZADSA7SrqSm4SXsJBhsabrEf7ZL5Ce6lH+Yf8oyN711uYyR1vCxO5LRfODMCidlmbOU",
- "GiK+d5/NVyMEwF4kaNPiEA/U408BiqUUJUjNLFBalkkuUponSlONkP5TwnJ2PPuPw8b+cmi7q8Ng8Dem",
- "1xl2MiqrVYMSWpY3gPHOqD5qRFgYAY2fUExYsYdKE+N2EQ0rMSOCc7iiXB80V5aWPKg38Ac3UkNvq+1Y",
- "eneuYIMEJ7bhApTVgG3DB4oEpCdIVoJkRYV0lYtF/cPDk7JsKIjfT8rS0gO1R2ComMGWKa0e4fRps5PC",
- "cU5fH5DvQ9ioigue78zhYFUNczYs3anlTrHatuTm0EB8oAgup5AHZmk8GYyafx8ch9eKtciN1rOXV0zj",
- "v7m2IZuZ3yd1/vdgsZC2w8yFFy1HOXvHwV+Cy83DDuf0GceZew7ISbfv7djGQIkzzK14ZXQ9LdwROtYk",
- "3EhaWgTdF3uWMo6XNNvI4npHaTpR0EVxDvZwwGuI1a332t79EMUEWaGDwze5SC//RtX6Hvb8wsPqbz8c",
- "hqyBZiDJmqr1wSymZYTbq4E2ZYuZhnjBJ4tgqIN6ivc1vT1Ty6imwdQcvnG1xJIe+6HQAxm5u/yE/6E5",
- "MZ/N3jai34I9IOcowJTdzs7JkJnbvr0g2JFMA7RCCFLYCz4xt+4bYfmqGTy+TpPW6FtrU3Ar5CaBKyS2",
- "974NvhHbGA7fiG1vC4gtqPvgDwMH1UgNhZqA32uHmcD1d+SjUtJdn8gIewqRzQSN6qpwN/DwxDejNMbZ",
- "k4WQt5M+HbHCSWNyJtRADYTvvEMkbFqViWPFiNnKNugAarx840KjCz5GsRYVzjT9E6igDNT7oEIb0H1T",
- "QRQly+EeWH8dFfoLquDZU3L2t5Ovnjz99elXLwxLllKsJC3IYqdBkYfubkaU3uXwqD8zvB1VuY5Df/Hc",
- "GyrbcGNwlKhkCgUt+6CsAdSqQLYZMe36VGuTGWddIzhlc56DkeSW7MTa9g1qr5kyGlaxuJfFGCJY1oyS",
- "EYdJBnuZ6abTa4bZhVOUO1ndx1UWpBQyYl/DLaZFKvLkCqRiIuJNeedaENfCq7dl93eLLdlQRczYaPqt",
- "OCoUEc7SWz5d7lvQ51ve0GZU8tv5Rmbnxp2yLm3ie0uiIiXIRG85yWBRrVo3oaUUBaEkw454Rn8P+mzH",
- "U7Sq3QeTDl/TCsbRxK92PA3ubGahcshWrUW4+92sSxVvn7NDPVARdAw53uBnvNa/hlzTe9dfugPEcH/l",
- "F9IiSzLTEG/Bb9hqrQMF850UYnn/OMZGiSGKH6x6nps+fSX9rcjATLZS93AYN8AaXjdrGnI4XYhKE0q4",
- "yAAtKpWKH9MDnnt0GaKnU4cnv15bjXsBhpFSWpnZViVBP15PcjQdE5pa7k2QNGrAi1G7n2wrO5z1CucS",
- "aGZu9cCJWDhXgXNi4CQpOiG1P+ickhDZSy28SilSUAqyxJko9qLm21khokfohIgjwvUoRAmypPLOyF5e",
- "7cXzEnYJuswVefjDL+rRF8BXC03zPYTFNjHy1hc+5w/qYz1t+DGG6w4esh2VQLzMNbdLIyBy0DBEwhvR",
- "ZHD9uhj1VvHuZLkCiZ6ZP5Xj/SB3Y6Aa1T+Z3++KbVUOBIK5i845K9BuxykXClLBMxUFllOlk31i2TRq",
- "3cbMDAJJGJPECHhAKXlDlbbeRMYzNILY4wTHsQqKGWIY4UGF1ED+xeuifdipOQe5qlStmKqqLIXUkMXm",
- "wGE7MtZb2NZjiWUAu9Z+tSCVgn2Qh6gUwHfEsjOxBKK6Nro7d3t/cmiaNuf8LkrKFhINIcYQOfOtAuqG",
- "wTADiDDVENoyDlMdzqkjcOYzpUVZGmmhk4rX/YbIdGZbn+ifm7Z95qK6ObczAQpjcFx7h/nGUtaGQa2p",
- "uUIjZFLQS6N74IXYuj37OJvNmCjGU0jGON9syzPTKtwCezdpVa4kzSDJIKe7PtCf7WdiP48BwBVvLj5C",
- "Q2LjWeKL3nCyDx8YAS0QnoopjwS/kNRsQXPzaBjE9d4DOQOEHRNOjo8e1KBwrOgSeXg4bbvUEYh4Gl4J",
- "bVbcsgNi7AT6FHwHyFBDvj0lsHPSXMu6Q/w3KDdArUbcfJAdqKEpNPBvNIEBY5qLFA62S0e6dwRwVGoO",
- "SrE9YmRoxw5Y9t5RqVnKSrzq/AC7e7/5dQeI+ptIBpqyHDISfLC3wDLsT2wgRhfm7W6Ck4wwffR7VpjI",
- "dHKmUONpI38JO7xyv7MRfudBXOA9XGUjUM3xRDlBRH3ckNHAwyawpanOd0ZP02vYkQ1IIKpaFExrG7nb",
- "vulqUSYhgKiBe2RE582x0XF+Baa4l84QVDC9/lLMZ/ZKMI7feede0CKHuwqUQuQTjEc9YkQxmOT4J6Uw",
- "q85cELEPI/Wc1ELSCW105dWn/wPVIjPOgPy3qEhKOd64Kg21SiMk6gmoP5oRjAZWj+lc/A2FIIcC7EUS",
- "vzx+3J3448duzZkiS9j4yHvTsEuOx4/RjPNOKN3aXPdgKjTb7TRyfKDlH889F7zQkSn7XcwO8pSVfNcB",
- "XrsLzJ5SyjGumf6dBUBnZ26nzD3kkWnudYQ7yagfgI7NG9f9jBVVTvV9uC9G9dH6PsGKAjJGNeQ7UkpI",
- "wUZXGwVLWVwMasTGXaVryleoV0tRrVzgj4WDgrFS1oIhK94DEVU+9JYnKymqMiYoXbCnD7A3agdQc/MJ",
- "CImdrZ6/ofV4LqdiygnmCR6szvcG5pBXYT4bvBgaol41F0NLnHaWQJwKmPaQqCpNAaIhwLErVz3VTjZk",
- "k9/iABq1oZI2BorQVFc0D7mOnC4J5bt2miRluTJSkCmC7UznJq52bufmc1iWNLe+2UhSRbhTWhpfsPIN",
- "SbukmOh3QCYx2lCfM0IGNNvLsPGfY8NvQMew7A8cBF01H4firsz9O9/dgxpkAREJpQSFh1Zot1L2q1iG",
- "uU/uVFM7paHom/Zt118HBM37wQuk4DnjkBSCwy6a7ss4/Igfo4IDD86BzqjCDPXt3kpa+HfQao8zhRvv",
- "Sl9c7UAWvasDDu9h8btwO16dMOsLrZaQl4SSNGdo0xRcaVml+oJTtJoEmy0SmOHvh8N2tFe+SdxwF7Gr",
- "OVAXnGJQTm1LiTqTlxAxHHwH4M1pqlqtQHXkJ1kCXHDXinFScaZxrMKsV2IXrASJ0REHtmVBd0YEotnv",
- "D5CCLCrdlsmYeaK0EZfWxWSGIWJ5wakmOZg79Y+Mn28RnHfRep7hoDdCXtZUiB8hK+CgmEriASTf268Y",
- "2+emv3ZxfpgpbD9bp4SB36Sn7NCo0mS//p+H/3X84ST5B03+OEpe/o/Dj5+eXz963Pvx6fXXX//f9k/P",
- "rr9+9F//GVspj3ssL8JhfvraXdZOX6NG3nglerh/Not0wXgSZbLQ997hLfIQcwAdAz1q22v0Gi643nLD",
- "SFc0Z5lRuW7DDl0R19uLdnd0uKa1EB37jJ/rDfXcO0gZEhEyHdF462O8H3MVz0BCN5lLKsL9sqy4XUqv",
- "6NoAex/7IpbzOsvMFqA4JpiCtKY+cMv9+fSrF7N5kzpUf5/NZ+7rxwgns2wb1Q5hG7u+uA2CG+OBIiXd",
- "KRhQQBH3aJiPjTYIwRZg7r1qzcrPLymUZou4hPNhy84MsuWn3MYTm/2DTreds+WL5efHW0ujh5d6HUtM",
- "b2kK2KpZTYBOIEQpxRXwOWEHcNA1Q2TmauYCjnKgS0yQxouemJKGUe8Dy2ieKwKqhxOZdNeP8Q8qt05a",
- "X89n7vBX966PO8AxvLpj1h42/7cW5MH3356TQycw1QObq2hBB9llkVurS6BohcgYaWbLcdhkzQt+wV/D",
- "knFmvh9f8IxqerigiqXqsFIgv6E55SkcrAQ59jkZr6mmF7ynaQ1WzAmyYUhZLXKWkstQI27Y01ZB6EO4",
- "uPhA85W4uPjYixbo669uqKh8sQMkG6bXotKJy+FOJGyojHljVJ3Di5BtkYaxUefEwbai2OWIO/hxmUfL",
- "UnVz+frTL8vcTD9gQ+Uy1cySEaWF9LqIUVAsNri+b4U7GCTdeBNGpUCR3wpafmBcfyTJRXV09AxIK7nt",
- "N3fkG57clTDZkDGYa9i1X+DE7b0GtlrSpKSrmNfn4uKDBlri6qO+XOAlO88Jdmsl1fmgYQTVTMDTY3gB",
- "LB43ThDCyZ3ZXr5eT3wK+AmXENsYdaNxRd92vYI0u1svVydVr7dKlV4nZm9HZ6UMi/uVqct4rIyS5eMD",
- "FFthDKareLIAkq4hvXSlKKAo9W7e6u5DUJyi6UUHU7ZIiU2SwTR5tJkvgFRlRp0q3rUgLXZEgdY+CPQ9",
- "XMLuXDRZ9jdJUG7ny6qhjYqcGmiXhlnDbetgdBffxTmhiassfdop5h95tjiu+cL3Gd7IVuW9h00cY4pW",
- "PucQIaiMEMIy/wAJbjFRA+9OrB+bnrllLOzJFylY4mU/cU2ay5MLSQpngwZu+70ArHgkNoosqNHbhSvW",
- "Y3NCAylWKbqCAQ05dFtMzLxsuToQyL5zL3rSiWX3QOudN1GUbePEzDnKKWC+GFbBy0wnEM2PZD1jzgmA",
- "NfgcwRY5qkl1xJ4VOlS23Ee2qNgQanEGBskbhcOj0aZIqNmsqfJ1hLDckt/Lk3SAPzHHeayyRWjQD2oq",
- "1fZ1L3O7+7R3u3T1LXxRC1/JIrxaTqhKYTR8DNuOLYfgqABlkMPKTtw29ozS5Fs3C2Tw+Gm5zBkHksTC",
- "sahSImW2EFRzzLgxwOjHjwmxJmAyGUKMjQO00eOLgMlbEe5NvroJktzli1MPG33Fwd8QT22xAcpG5RGl",
- "EeFswIGUeglAXQxffX51IkkRDGF8ToyYu6K5EXPuxtcA6RVYQLW1U07BxRw8GlJnRyzw9mC50ZzsUXSb",
- "2YQ6k0c6rtCNYLwQ28TmtkU13sV2Yfg9GrONmXaxjWlLWTxQZCG2GMeCR4uNEd6DyzAeHo3ghr9lCvkV",
- "+w2d5haZsWHHtakYFypkGWfOq9llSJ2YMvSABjPELg+D6hS3QqBj7GhKvbrL795Lals96R/mzak2b6ou",
- "+XSY2PYf2kLRVRqgX98KU9eTeNfVWKJ2inY4RruURqBCxpjeiIm+k6bvClKQA14KkpYSlVzGXHfmbgN4",
- "4pz5boHxAgt2UL57FMT4SFgxpaExovuQhC9hnqRYJ0yI5fDsdCmXZn7vhaiPKVuIBju2pvnZZ4Axsksm",
- "lU7QAxGdgmn0ncJL9XemaVxXakcR2aqaLIvLBhz2EnZJxvIqzq9u3B9em2Hf1iJRVQuUt4zb2JAFVoGN",
- "xhaODG3DT0cn/MZO+A29t/lO2w2mqRlYGnZpj/Fvsi86kndMHEQYMMYc/VUbJOmIgAxSQvvSMdCb7ObE",
- "lNCDMetrbzNlHvbesBGfmDp0RllI0bkEBoPRWTB0Exm1hOmgiGo/V3NgD9CyZNm2Ywu1UAdvzPRGBg9f",
- "eqpDBVxdB2wPBQK7ZyxdRIJqVxlrFHxbDrdV5ONgEmXO27XAQoEQDsWUL+beJ1SdTraPVudA8x9g94tp",
- "i9OZXc9ndzOdxmjtIO6h9bt6eaN0Rte8NaW1PCE3JDktSymuaJ44A/MQa0px5VgTm3t79GcWdXEz5vm3",
- "J2/eOfSv57M0ByqTWlUYnBW2K/9tZmULmg1sEF8s2tz5vM5uVclg8esqTKFRerMGV3U30EZ75QEbh0Ow",
- "FZ2RehmPENprcna+ETvFER8JlLWLpDHfWQ9J2ytCryjLvd3MYzsQzYOTm1ZjMioVQgB39q4ETrLkXsVN",
- "b3fHd0fDXXtkUjjWSF3gwpa+VkTwrgsdw4t3pfO6FxSL+1mrSF848apAS0KicpbGbax8oQxzcOs7M40J",
- "Nh5QRg3Eig24YnnFAlimmZpw0e0gGYwRJaYvFDlEu4Vwz5pUnP1eAWEZcG0+SdyVnY2K1RSdtb1/nBrd",
- "oT+WA2wt9A34u+gYYWHL7omHSIwrGKGnrofu6/rK7CdaW6Qw3LpxSdzA4R+O2DsSR5z1jj8cN9vgxXXb",
- "4xa+QtKXf4YxbDnq/U+g+Murq7A5MEb0SROmkqUUf0D8nofX40gqji/lyTDK5Q/gE2LOG+tO8zJLM/rg",
- "cg9pN6EVqh2kMMD1uPKBWw5rCnoLNeV2qe0LA61YtzjDhFGlhxZ+wzAO514kbk43CxoruGiUDIPTSeMA",
- "btnStSC+s6e9qhMb7Ogk8CXXbZnNsi5BNlly/Yott1QY7LCTVYVGM0CuDXWCufX/5UpEwFR8Q7l9qML0",
- "s1vJ9VZgjV+m10ZIrJGg4mb/DFJW0DyuOWRp38SbsRWzbzBUCoIi/w6Qfd/GcpF7KKFO13GkOV2So3nw",
- "0ohbjYxdMcUWOWCLJ7bFgiqU5LUhqu5ipgdcrxU2fzqh+brimYRMr5UlrBKkVurwelM7rxagNwCcHGG7",
- "Jy/JQ3TbKXYFjwwV3fk8O37yEo2u9o+j2AHg3tAYkyYZipO/O3ES52P0W1oYRnA7qAfRdHL7iNaw4BrZ",
- "TbbrlL2ELZ2s27+XCsrpCuKRIsUenGxfXE00pHXowjP7AozSUuwI0/HxQVMjnwaiz434s2iQVBQF04Vz",
- "7ihRGH5qKvjbQT04+5yMK77q8fIf0UdaehdR5xL5eY2m9nyLzRo92W9pAW2yzgm1hTFy1kQv+JLQ5NTX",
- "3cFqtHURWksbM5aZOqo5GMywJKVkXOPFotLL5K8kXVNJUyP+DobQTRYvnkcq8LYrQfKbIf7Z6S5BgbyK",
- "k14OsL3XIVxf8pALnhRGomSPmmyPYFcOOnPjbrsh3+E46KlKmYGSDLJb1WI3GkjqOzEeHwF4R1as53Mj",
- "frzxzD47Z1Yyzh60Miv08/s3TssohIwV02u2u9M4JGjJ4Apj9+KLZGDecS1kPmkV7oL9l/U8eJUzUMv8",
- "Xo5dBL4RkduprwpdW9JdrHrEOjC0Tc0HwwYLB2pO2hV4P7/Tzxuf+84n88Xjin90kf3CS4pE9jMYWMSg",
- "Onh0ObP6e+D/puQbsZ26qJ0d4hf2X4A0UZJULM9+abIyO8XXJeXpOurPWpiOvzbPRNWTs+dTtGbdmnIO",
- "eRSc1QV/9TpjRKv9p5g6TsH4xLbdevB2up3JNYi30fRI+QENeZnOzQAhVdsJb3VAdb4SGcFxmgJpjfTs",
- "vyMQVHv+vQKlY8lD+MEGdaHd0tx3bbFhAjzD2+IB+d6+BLsG0ip/g7e0uoqAK31rDepVmQuazbGQw/m3",
- "J2+IHdX2sY+d2GLHK7yktGfRsVcFtR+nhQf7d0viqQvT4YzHUptZK43VqJSmRRlLDjUtzn0DzEANbfh4",
- "fQmpc0BeB2862jxSA8Lww5LJwty4amhWd0GeMP/RmqZrvJK1ROowy0+v0u25UgUv49Uv3NQFEXHfGbxd",
- "oW5bp3tOhLk3b5iyD4DCFbTzUevkbGcS8Pmp7enJinPLKVHdY6x4wG3I7pGzgRrezB/FrEP4Gyrktsj9",
- "TYuWn2GvaIGmbgX03pN4NruxfrnEP+ycUi44S7E8Uuxodi+FTvGBTagk1TWy+i3udmhkc0Xrrtdhco6K",
- "g5XYvSB0hOsb4YOvZlEtd9g/NT5JuaaarEArJ9kgm/vnA5wdkHEFrsAlvisbyEkhW35FlJBRV3VSuzRu",
- "yEaYFjNwsfvOfHvrrv0YL37JOCr4jmwuNN1a6vAhQ21uBUyTlQDl5tPODVYfTJ8DTJPNYPvxwD98aKvB",
- "oFvOTNv6oPugTrxH2nmATdtXpq2rE1T/3IpAtoOelKUbdPhxiag+oLd8kMARz2LiXTsBcWv4IbQRdhsN",
- "JcHz1DAaXKEjGko8h3uMUT+00HnExyitlqOwBbEhXNEKBoxH0HjDODTPckYOiDR6JODC4H4d6KdSSbVV",
- "ASfJtHOgOXqfYwJNaed6uCuobi0hQxKcox9jeBmbNyIGBEfdoFHcKN/Vr4Ea7g6UiVf4DLEjZP/FB9Sq",
- "nBKVYUZB5w2ImOAwgtu/MtM+APrboK8T2e5aUrtzbnISDSWJLqpsBTqhWRarSPUNfiX41ReXgi2kVV2Y",
- "sixJijVR2kVi+tzmBkoFV1UxMpZvcMfhgkdVItwQPuziVxiTUBY7/DdWlXF4ZVwQxo3DAH3EhXuF4oZ6",
- "cxtST+s1PJ0otkqmUwLPlLuToxn6doze9L9XTs/Fqo3IZy4NMSblwjWKybdvzcERVk7olRq1R0td2ACD",
- "7oR/Cg+vjXVKblsq4VHWqz2Kzp76qa1xA8Two1lzPPwGQm+DghjUnq/WezgUgJsOxotT7TLXNCWjImgw",
- "G8hG79i8H8QibjkditixATvmc6/3NM2wp2cj7FGC+lCwPkI/+DhTUlLmXOONsOhT1kWkD5sLxzZds8Dd",
- "Sbg470GL3Q9XQzHZRDG+yoHg9+4zQ5fg0tnrd+btXH1Ukr8S2l/dM68WXh0VH51/PzoBh/qyZtBBo+25",
- "K2lvp+nu5D/8YmPYCHAtd/8CJtzeovceaepru9Y81TQhdTnkSeWRW6di/L2l4fpHTc0j5KdSKNaU4I49",
- "xDQx1u0c31IK6jf1YflAkytINdZdbxzoEuAm1ZzMYMEjf/+/DtLA3bEOCXTlj8ZqHvWLre850HppSUFq",
- "nS1UfTC9ws9JHSaFQgkr4K6Au3f22gkHk8Oel0tINbvakwb29zXwIMVo7o0Q9r3cICuM1WG0WEXk5ia2",
- "BqGxLK1RfIJqfndGZygJ5BJ2DxRpcUO0cvbcnyu3KSCBFEDpkBgWESoWhmCtps4zzFTNGUgFH/Zju0NT",
- "imvwzZ0gqfGWY3mWNCduk+g4MmT80Y9JY5muN0r/xYjQoUyx/qMBw8r2a3yjQdXv4fkCFOGVlJz2y/Rt",
- "XAELTNqrHQW+lAUo/5vP0LWj5OwSwleB0C2zoTLzLaJ2Bm/CSEbOo156ly9430V6WY/MmiDNfkJPpPAT",
- "huKmuTD6VzIUz9yOiwwfz8foD1vyGyM+DV5LkO71NFT2cqEg0cIHdY7hMUYK99D7bYigBostWuQGS6C8",
- "b2q8YNFZiiVPqItsCSdIJBTUYCeDSizDY44R+5X97jNYfNHRveaUml/3F5r34blM9YgYcv2SuNNyf2bM",
- "bSwrjHP7VquKlWXhhpSh6b+UIqtSe0CHG6O2Pk0uejQiSqJGibQ/y979MscSYG+CPMNL2B1a1d+X6vdL",
- "GWJvVSg7hyCvv7Pa92p0it+v85WdwOpe8PyShpv5rBQiTwZs/af96jLdPXDJ0kvIiDk7fGDbwLMl5CGa",
- "mGtn7ma989VUyhI4ZI8OCDnhNpTY+3Xb5Y07g/MHemz8LY6aVbbgk7MpHVzweEwmlmKSd5RvHsy4VFNg",
- "hN8dh7JA9tQu2Q5UtpF0E3nE52DqpbTvae0+rNIwlcUipqXsecIi4kX2byL4FzZ8xooWBUv7ryj0VIkl",
- "vkaV0Ajw01qAz1tvBbLOwx2+xpB9piGlVoEzlwfK8kqCyxywz+Z0yumXVK/98pnmfTXLHNmgMKzflmSn",
- "yl4K/OXEvdnT3ReiTHK4gpYjwaUzVGkKSrErCN/7sZ1JBlDiVb17gMQs5CFfdWSIm3sS2FinUDcqVCxh",
- "7UqRPRJj4DH2xLKHmspCBqMrllW0RT91h6dYJr7tHuI6cYfceHPEJ9fbGu65lKQu5hYzZLp0Er+Ehn+b",
- "p106ClLwBEsNc+AtypoKd1FGBkkbp+ztimhM4oe+TTuyZYJnV8YtL2GNnSZ4V1rXCN7U/K7rLumPzW6c",
- "9gCM77AHvdAgFzwB4zUhh84XjrD9sSZKMJVBTmhNf5+Nz02wEV/BElnZbaZpK57Z6Kz2ugQGXPWqtosO",
- "vcvUNZ9iQR3BschY3+yq0FWGtcpDxjGyW17R/PObTrHS0gnSw71zG59oaHsLiWxJqW4X5vaGTho7sLPd",
- "39D8HZp6/w5mjaI+TgfK+TxqXcF7hlBk0pzkonnhDkGSDcK0TtEnL8jCpeiUElKmWCd7cePLKNemJnxV",
- "oHn+eNy2tW+evwh9BzZeevWFvG1KsmqBJ0aDYbNFv7BQGdi5US6PcV+PLSL0i8mosFbGnuPisuUttSWu",
- "O2GAQsI9e02D+Kcbek37VUCmTs96Bs2hUynoz3Pyad2ibeSgbuY21eXfJ+5Y3c4pnvp4OV7THUMFLEGw",
- "ljVBVMlvT34jEpb4WI0gjx/jAI8fz13T3562P5vt/Phx/JnlzxUkYGnkYLhxYxzzy1DYuA2NHshQ6KxH",
- "xfJsH2O08k2a554wo+JXl3H2RR6c+tX6cvpb1T36cZPwpO4iIGEic20NHgwVZJJMSCJx3SIpI2gVSSvJ",
- "9A4L4XjTP/s1Gs7wfe0tdN7munSCO/u0uIS6lFLjW6yUP12/FzTH88jo1BgcpvFp3W+3tChzcBvl6weL",
- "v8Czvz7Pjp49+cvir0dfHaXw/KuXR0f05XP65OWzJ/D0r189P4InyxcvF0+zp8+fLp4/ff7iq5fps+dP",
- "Fs9fvPzLAyOHDMoW0ZlPu579b3yVLTl5d5qcG2QbmtCS1S9qGzb2T8vQFHciFJTls2P/0//0O+wgFUUD",
- "3v86c1mds7XWpTo+PNxsNgdhl8MVOhMSLap0fejH6b9k/O60zsyxV0tcUZt04U0GnhVO8Nv7b8/Oycm7",
- "04Pgpczj2dHB0cETfEixBE5LNjuePcOfcPescd0PHbPNjj9dz2eHa6A5+t7NHwVoyVL/SW3oagXywL2x",
- "Y366enroVYnDT86Rcj327TAsV334qeVvyvb0xHK2h598lZbx1q0yKM7PFnSYiMVYs8MFJn9ObQoqaDw8",
- "FbxgqMNPqCIP/n7oMuLiH/GqYvfAoXfKxlu2qPRJbw2unR7uSf7DT/gf5MkALRt/2kcXtiVIZpQs635e",
- "2ZJRNQufZrPj2bdBo1drSC+xQLI1MiBvPj06igTFB72I3Sp0kUNm+Pz50fMJHbjQYSdXyqPf8Wd+ycWG",
- "EwyhtHKzKgoqd6iP6EpyRX76gbAlge4QTPkRcK/SlUJTLVZjnc1nLfJ8vHZEs+l1h/YF0IaW/ucdT6M/",
- "9qnffYki9vPhp3Yl1BYXqnWlM7EJ+uLNxV67++PVbwO0/j7cUKaNLuLCEbBSTb+zBpofukSbzq9NbGvv",
- "CwbsBj+236uP/HpYFwKLfuxKiNhXt0MGGvk0Sf+50RDCE3d2/CE4az98vP5ovknTGj81B8jx4SG6+NZC",
- "6cPZ9fxT53AJP36secznH89Kya4wnPnj9f8LAAD//+4FyDz4swAA",
+ "H4sIAAAAAAAC/+x9/ZPbNrLgv4LSe1X+OFEafyRvPVWpdxM7yc7FcVyeSfbe8/gSiGxJ2CEBLgBqpPj8",
+ "v1+hAZAgCUrUzMTerbqf7BHx0Wg0Gv2Nj5NUFKXgwLWanH6clFTSAjRI/Iumqai4Tlhm/spApZKVmgk+",
+ "OfXfiNKS8dVkOmHm15Lq9WQ64bSApo3pP51I+EfFJGSTUy0rmE5UuoaCmoH1rjSt65G2yUokbogzO8T5",
+ "q8mnPR9olklQqg/lzzzfEcbTvMqAaEm5oqn5pMgN02ui10wR15kwTgQHIpZEr1uNyZJBnqmZX+Q/KpC7",
+ "YJVu8uElfWpATKTIoQ/nS1EsGAcPFdRA1RtCtCAZLLHRmmpiZjCw+oZaEAVUpmuyFPIAqBaIEF7gVTE5",
+ "fT9RwDOQuFspsA3+dykB/oBEU7kCPfkwjS1uqUEmmhWRpZ077EtQVa4Vwba4xhXbACem14z8VClNFkAo",
+ "J+++f0mePXv2wiykoFpD5ohscFXN7OGabPfJ6SSjGvznPq3RfCUk5VlSt3/3/Uuc/8ItcGwrqhTED8uZ",
+ "+ULOXw0twHeMkBDjGla4Dy3qNz0ih6L5eQFLIWHkntjG97op4fxfdFdSqtN1KRjXkX0h+JXYz1EeFnTf",
+ "x8NqAFrtS4MpaQZ9f5K8+PDxyfTJyad/e3+W/Lf786tnn0Yu/2U97gEMRBumlZTA012ykkDxtKwp7+Pj",
+ "naMHtRZVnpE13eDm0wJZvetLTF/LOjc0rwydsFSKs3wlFKGOjDJY0irXxE9MKp4bNmVGc9ROmCKlFBuW",
+ "QTY13PdmzdI1SamyQ2A7csPy3NBgpSAborX46vYcpk8hSgxct8IHLuifFxnNug5gArbIDZI0FwoSLQ5c",
+ "T/7GoTwj4YXS3FXquMuKXK6B4OTmg71sEXfc0HSe74jGfc0IVYQSfzVNCVuSnajIDW5Ozq6xv1uNwVpB",
+ "DNJwc1r3qDm8Q+jrISOCvIUQOVCOyPPnro8yvmSrSoIiN2vQa3fnSVCl4AqIWPwdUm22/X9d/PyGCEl+",
+ "AqXoCt7S9JoAT0UG2YycLwkXOiANR0uIQ9NzaB0Ortgl/3clDE0UalXS9Dp+o+esYJFV/US3rKgKwqti",
+ "AdJsqb9CtCASdCX5EEB2xAOkWNBtf9JLWfEU97+ZtiXLGWpjqszpDhFW0O03J1MHjiI0z0kJPGN8RfSW",
+ "D8pxZu7D4CVSVDwbIeZos6fBxapKSNmSQUbqUfZA4qY5BA/jx8HTCF8BOH6QQXDqWQ6Aw2EboRlzus0X",
+ "UtIVBCQzI7845oZftbgGXhM6WezwUylhw0Sl6k4DMOLU+yVwLjQkpYQli9DYhUOHYTC2jePAhZOBUsE1",
+ "ZRwyw5wRaKHBMqtBmIIJ9+s7/Vt8QRV8/Xzojm++jtz9peju+t4dH7Xb2CixRzJydZqv7sDGJatW/xH6",
+ "YTi3YqvE/tzbSLa6NLfNkuV4E/3d7J9HQ6WQCbQQ4e8mxVac6krC6RV/bP4iCbnQlGdUZuaXwv70U5Vr",
+ "dsFW5qfc/vRarFh6wVYDyKxhjSpc2K2w/5jx4uxYb6N6xWshrqsyXFDaUlwXO3L+amiT7ZjHEuZZre2G",
+ "isfl1isjx/bQ23ojB4AcxF1JTcNr2Ekw0NJ0if9sl0hPdCn/MP+UZW5663IZQ62hY3clo/nAmRXOyjJn",
+ "KTVIfOc+m6+GCYBVJGjTYo4X6unHAMRSihKkZnZQWpZJLlKaJ0pTjSP9u4Tl5HTyb/PG/jK33dU8mPy1",
+ "6XWBnYzIasWghJblEWO8NaKP2sMsDIPGT8gmLNtDoYlxu4mGlJhhwTlsKNezRmVp8YP6AL93MzX4ttKO",
+ "xXdHBRtEOLENF6CsBGwbPlAkQD1BtBJEKwqkq1ws6h8enpVlg0H8flaWFh8oPQJDwQy2TGn1CJdPm5MU",
+ "znP+akZ+CMdGUVzwfGcuBytqmLth6W4td4vVtiW3hmbEB4rgdgo5M1vj0WDE/PugOFQr1iI3Us9BWjGN",
+ "/+rahmRmfh/V+V+DxELcDhMXKloOc1bHwV8C5eZhh3L6hOPMPTNy1u17O7Ixo8QJ5la0snc/7bh78Fij",
+ "8EbS0gLovti7lHFU0mwjC+sduelIRheFOTjDAa0hVLc+awfPQxQSJIUODN/mIr3+K1XrezjzCz9W//jh",
+ "NGQNNANJ1lStZ5OYlBEer2a0MUfMNEQFnyyCqWb1Eu9reQeWllFNg6U5eONiiUU99kOmBzKiu/yM/6E5",
+ "MZ/N2Tas3w47I5fIwJQ9zs7JkBlt3yoIdibTAK0QghRWwSdG6z4KypfN5PF9GrVH31mbgtshtwjcIbG9",
+ "92PwrdjGYPhWbHtHQGxB3Qd9mHFQjNRQqBHwvXKQCdx/hz4qJd31kYxjj0GyWaARXRWeBh7e+GaWxjh7",
+ "thDydtynw1Y4aUzOhJpRA+Y77SAJm1Zl4kgxYrayDToDNV6+/UyjO3wMYy0sXGj6J2BBmVHvAwvtge4b",
+ "C6IoWQ73QPrrKNNfUAXPnpKLv5599eTpb0+/+tqQZCnFStKCLHYaFHnodDOi9C6HR/2VoXZU5To++tfP",
+ "vaGyPW5sHCUqmUJBy/5Q1gBqRSDbjJh2fay10YyrrgEcczgvwXByi3ZibfsGtFdMGQmrWNzLZgwhLGtm",
+ "yYiDJIODxHTs8pppduES5U5W96HKgpRCRuxreMS0SEWebEAqJiLelLeuBXEtvHhbdn+30JIbqoiZG02/",
+ "FUeBIkJZesvH83079OWWN7jZy/nteiOrc/OO2Zc28r0lUZESZKK3nGSwqFYtTWgpRUEoybAj3tE/gEZR",
+ "4JIVcKFpUf68XN6PqihwoIjKxgpQZiZiWxi5XkEquI2EOKCduVHHoKeLGG+i08MAOIxc7HiKdsb7OLbD",
+ "imvBODo91I6ngRZrYMwhW7XI8u7a6hA67FQPVAQcg47X+BkNHa8g1/R7IS8bS+APUlTlvQt53TnHLoe6",
+ "xThTSmb6eh2a8VXejr5ZGdhnsTV+kQW99MfXrQGhR4p8zVZrHagVb6UQy/uHMTZLDFD8YJWy3PTpq2Zv",
+ "RGaYia7UPYhgzWANhzN0G/I1uhCVJpRwkQFufqXiwtlAvAY6itG/rUN5T6+tnrUAQ10prcxqq5Kg97Z3",
+ "XzQdE5raE5ogatSA76p2OtpWdjobC5BLoNmOLAA4EQvnIHKuK1wkRdez9uKNEw0j/KIFVylFCkpBljjD",
+ "1EHQfDt7deg9eELAEeB6FqIEWVJ5Z2CvNwfhvIZdgoESijz88Vf16AvAq4Wm+QHEYpsYems133kB+1CP",
+ "m34fwXUnD8mOSiD+XiFaoDSbg4YhFB6Fk8H960LU28W7o2UDEv1xfyrF+0nuRkA1qH8yvd8V2qocCP9z",
+ "6q2R8MyGccqFF6xig+VU6eQQWzaNWjq4WUHACWOcGAceELxeU6WtD5nxDE1f9jrBeawQZqYYBnhQDTEj",
+ "/+o1kP7YqbkHuapUrY6oqiyF1JDF1sBhu2euN7Ct5xLLYOxa59GCVAoOjTyEpWB8hyy7EosgqmtXiwuy",
+ "6C8OHRLmnt9FUdkCokHEPkAufKsAu2EI1AAgTDWItoTDVIdy6rir6URpUZaGW+ik4nW/ITRd2NZn+pem",
+ "bZ+4qG7u7UyAwsgr195BfmMxa4Pf1lQRBwcp6LWRPdAMYp3dfZjNYUwU4ykk+ygfVTzTKjwCBw9pVa4k",
+ "zSDJIKe7/qC/2M/Eft43AO54o+4KDYmNYopvekPJPmhkz9ACx1Mx4ZHgF5KaI2hUgYZAXO8DI2eAY8eY",
+ "k6OjB/VQOFd0i/x4uGy71ZER8TbcCG123NEDguw4+hiAB/BQD317VGDnpNE9u1P8Fyg3QS1HHD/JDtTQ",
+ "Eprxj1rAgA3VBYgH56XD3jscOMo2B9nYAT4ydGQHDLpvqdQsZSXqOj/C7t5Vv+4EUTcjyUBTlkNGgg9W",
+ "DSzD/sTG33THvJ0qOMr21ge/Z3yLLCdnCkWeNvDXsEOd+60N7AxMHfehy0ZGNfcT5QQB9eFiRgQPm8CW",
+ "pjrfGUFNr2FHbkACUdWiYFrbgO22qqtFmYQDRP0ae2Z0TjwbFOl3YIxX8QKHCpbX34rpxOoE++G77CgG",
+ "LXQ4XaAUIh9hIeshIwrBqHgPUgqz68zFjvvoYU9JLSAd00YPbn39P1AtNOMKyH+JiqSUo8pVaahlGiFR",
+ "UEAB0sxgRLB6ThfZ0WAIcijAapL45fHj7sIfP3Z7zhRZwo1PuDANu+h4/BjtOG+F0q3DdQ/2UHPcziPX",
+ "Bzp8zMXntJAuTzkcWeBGHrOTbzuD114ic6aUcoRrln9nBtA5mdsxaw9pZFxUBY47ypcTDB1bN+77BSuq",
+ "nOr78FrBhuaJ2ICULIODnNxNzAT/bkPzn+tuB3S6JgqMFQVkjGrId6SUkIKNzjeimqrHnhEbt5euKV+h",
+ "hC5FtXKBY3Yc5LCVsrYQWfHeEFEpRm95glblGMd1wcI+QcPIL0CNDtU1SVuN4YbW87mcnDFXod+5iIk+",
+ "6pWaTgZVTIPUTaNiWuS0s0xGcN+WgBXgp5l4pO8CUWeEjT6+wm0x1Gs298+xkTdDx6DsTxyEsjUfh6LZ",
+ "jH6b7+5ByrADEQmlBIV3QmgXUvarWIYZZe7SUDuloeibzm3X3waO37tBBU3wnHFICsFhF02iZhx+wo/R",
+ "44T30kBnlBCG+naF/hb8HbDa84yhxrviF3e7e0K7LiL1vZD35YO0A46Wp0e4/A76t92Ut3VM0jyP+PJc",
+ "vkmXAahpnd/OJKFKiZShkHSeqak9aM7955JT2uh/W0fR3sPZ647bcVqFqYxolIW8JJSkOUOTreBKyyrV",
+ "V5yiUShYaiTayGu/w2bCl75J3C4ZMRu6oa44xUiz2lQUjZBYQsQu8j2AtxaqarUCpTvKxRLgirtWjJOK",
+ "M41zFea4JPa8lCAx5GdmWxZ0R5aGJrQgf4AUZFHptriN6VRKszx3HjQzDRHLK041yYEqTX5i/HKLw3kv",
+ "uz+yHPSNkNc1FuK3+wo4KKaSeFTUD/YrBqy65a9d8Cqmv9vP1udixm9yrnZoM2pSuv/Pw/88fX+W/DdN",
+ "/jhJXvyP+YePzz89etz78emnb775v+2fnn365tF//ntspzzssWQfB/n5K6eKnr9CfaNxuvRg/2wG94Lx",
+ "JEpkYfhEh7bIQ0xsdQT0qG2N0mu44nrLDSFtaM4yw1tuQw7dG6Z3Fu3p6FBNayM61ie/1iOl+DtwGRJh",
+ "Mh3WeGspqh9IGE+rQy+gy5TD87KsuN1KL33brBEf0CWW0zp10lZVOSWYV7emPhrR/fn0q68n0yYfrv4+",
+ "mU7c1w8RSmbZNpb1mME2ppy5A4IH44EiJd0p0HHugbBHY9dsMEU4bAFGq1drVn5+TqE0W8Q5nI/Fd0ae",
+ "LT/nNkjenB/0Ke6cq0IsPz/cWgJkUOp1rNpCS1DDVs1uAnTiPEopNsCnhM1g1jWyZEZfdFF0OdAlZv2j",
+ "9inGaEP1ObCE5qkiwHq4kFGWjBj9oMjjuPWn6cRd/ure1SE3cAyu7py1A9H/rQV58MN3l2TuGKZ6YBNw",
+ "7dBBymRElXZZQa0IIMPNbI0ZK+Rd8Sv+CpaMM/P99IpnVNP5giqWqnmlQH5Lc8pTmK0EOfWJRq+ople8",
+ "J2kNloEKUrxIWS1ylpLrUCFpyNOW9uiPcHX1nuYrcXX1oRcM0Vcf3FRR/mInSIwgLCqduMIEiYQbKmPO",
+ "JlUnpuPItvLIvlmtkC0qa5H0hQ/c+HGeR8tSdRNU+8svy9wsPyBD5dIvzZYRpYX0sogRUCw0uL9vhLsY",
+ "JL3xdpVKgSK/F7R8z7j+QJKr6uTkGZBWxubv7so3NLkrYbR1ZTCBtmtUwYVbtRK2WtKkpKuYT+vq6r0G",
+ "WuLuo7xcoI0jzwl2a2WK+kh4HKpZgMfH8AZYOI7OesPFXdhevghVfAn4CbcQ2xhxo/G033a/gtzRW29X",
+ "J/+0t0uVXifmbEdXpQyJ+52pa9OsjJDlwx8UW6G26sr4LICka0ivXX0VKEq9m7a6+wgbJ2h61sGUrbxj",
+ "M7+w9gN6BBZAqjKjThSnfNdNwlegtY/jfQfXsLsUTemIY7Lu20ngauigIqUG0qUh1vDYujG6m+/CuFCx",
+ "L0ufS41JdZ4sTmu68H2GD7IVee/hEMeIopWkPIQIKiOIsMQ/gIJbLNSMdyfSjy3PaBkLe/NFqvB43k9c",
+ "k0Z5chFX4WrQ6m6/F4BlvMSNIgtq5HbhKlDZROeAi1WKrmBAQg6dMiPTiVuOHBzk0L0XvenEsnuh9e6b",
+ "KMi2cWLWHKUUMF8MqaAy04mz8zNZv5/zTGBhSYewRY5iUh2QaJkOlS3nmK2UNwRanIBB8kbg8GC0MRJK",
+ "NmuqfHEsrCHmz/IoGeBPTNzfV67lPAgRCwqF1cVYPM/tntOedumKtvhKLb48S6hajii1YiR8jEqPbYfg",
+ "KABlkMPKLtw29oTSFBFoNsjA8fNymTMOJIlFmwVm0OCacXOAkY8fE2It8GT0CDEyDsBGfzYOTN6I8Gzy",
+ "1TFAclcEgfqx0RMe/A3xfC0bf21EHlEaFs4GvFqp5wDUhSjW91cnUBaHIYxPiWFzG5obNuc0vmaQXtUQ",
+ "FFs7NUJcRMWjIXF2jwPEXixHrcleRbdZTSgzeaDjAt0eiBdim9iEzajEu9guDL1HQ9IxfTR2MG19lgeK",
+ "LMQWo3TwarEh0AdgGYbDgxFo+FumkF6x39BtboHZN+1+aSpGhQpJxpnzanIZEifGTD0gwQyRy8Og5Mqt",
+ "AOgYO5r6xU75PaiktsWT/mXe3GrTppSYz/aJHf+hIxTdpQH89a0wdZGUt12JJWqnaAebtOvDBCJkjOgN",
+ "m+g7afquIAU5oFKQtISo5DrmOTW6DeCNc+G7BcYLrEJD+e5REMEkYcWUhsaI7uMkvoR5kmLxOyGWw6vT",
+ "pVya9b0Tor6mrBsRO7aW+dlXgCHASyaVTtADEV2CafS9QqX6e9M0Liu1Y6RsqViWxXkDTnsNuyRjeRWn",
+ "Vzfvj6/MtG9qlqiqBfJbxm3AygJLG0cjJ/dMbYNr9y74tV3wa3pv6x13GkxTM7E05NKe41/kXHQ47z52",
+ "ECHAGHH0d20QpXsYZJDx2ueOgdwU+Phn+6yvvcOU+bEPRu34vNuhO8qOFF1LYDDYuwqGbiIjljAdVAbu",
+ "p6IOnAFalizbdmyhdtRBjZkeZfDw9dQ6WMDddYMdwEBg94xlw0hQ7dJ5jYBvazy3KtfMRmHmsl3gLmQI",
+ "4VRM+RcK+oiqs+UO4eoSaP4j7H41bXE5k0/Tyd1MpzFcuxEP4Pptvb1RPKNr3prSWp6QI1FOy1KKDc0T",
+ "Z2AeIk0pNo40sbm3R39mVhc3Y15+d/b6rQP/03SS5kBlUosKg6vCduW/zKpslb6BA+IroBudz8vsVpQM",
+ "Nr8uLRYapW/W4EpJB9Jor+Zl43AIjqIzUi/jEUIHTc7ON2KXuMdHAmXtImnMd9ZD0vaK0A1lubebeWgH",
+ "onlwceMKp0a5QjjAnb0rgZMsuVd20zvd8dPRUNcBnhTOtafYdWHruSsieNeFjjHPu9J53QuKFSutVaTP",
+ "nHhVoCUhUTlL4zZWvlCGOLj1nZnGBBsPCKNmxIoNuGJ5xYKxTLMxNWk6QAZzRJGpomVxGtwthHurp+Ls",
+ "HxUQlgHX5pPEU9k5qFjexFnb+9epkR36c7mBrYW+Gf4uMkZYrbV74yEQ+wWM0FPXA/dVrTL7hdYWKfND",
+ "4JI4wuEfzti7Evc46x19OGq2wYvrtsctfFqnz/8MYdga64ff9fHKqysbOzBH9J0eppKlFH9AXM9D9TiS",
+ "aOTr0zKMcvkDwkSH8HWKFouprTvNc0PN7IPbPSTdhFaodpDCANXjzgduOSyU6S3UlNutts9mtGLd4gQT",
+ "RpXO7fgNwTiYe5G4Ob1Z0FgVUSNkGJjOGgdwy5auBfGdPe5VnW1hZyeBL7luy2wSeQmyyQHsF6S5pcBg",
+ "px0tKjSSAVJtKBNMrf8vVyIyTMVvKLevr5h+9ii53gqs8cv0uhESS0CouNk/g5QVNI9LDlnaN/FmbMXs",
+ "wyKVguDlCjeQfbTJUpF7/aPOIXKoOV+Sk2nwfI7bjYxtmGKLHLDFE9tiQRVy8toQVXcxywOu1wqbPx3R",
+ "fF3xTEKm18oiVglSC3Wo3tTOqwXoGwBOTrDdkxfkIbrtFNvAI4NFdz9PTp+8QKOr/eMkdgG4h2H2cZMM",
+ "2cnfHDuJ0zH6Le0YhnG7UWfRbHn7Mtww49pzmmzXMWcJWzped/gsFZTTFcQjRYoDMNm+uJtoSOvghWf2",
+ "WSOlpdgRpuPzg6aGPw1Enxv2Z8EgqSgKpgvn3FGiMPTUPEthJ/XD2TeSXEVhD5f/iD7S0ruIOkrk5zWa",
+ "2vsttmr0ZL+hBbTROiXU1v3IWRO94Ouck3NfVghLLNeVlS1uzFxm6SjmYDDDkpSScY2KRaWXyV9IuqaS",
+ "pob9zYbATRZfP4+UlW6XN+XHAf7Z8S5BgdzEUS8HyN7LEK4vecgFTwrDUbJHTbZHcCoHnblxt92Q73D/",
+ "0GOFMjNKMkhuVYvcaMCp70R4fM+AdyTFej1H0ePRK/vslFnJOHnQyuzQL+9eOymjEDJWK7A57k7ikKAl",
+ "gw3G7sU3yYx5x72Q+ahduAv0X9bz4EXOQCzzZzmmCHwrItqpL3VeW9JdrHrEOjB0TM0HQwYLN9SUtMtK",
+ "f34+ej9RUHFPlzds9x1b5ovHA/7RRcQXJhfcwMaXb1cyQChBWf0oyWT198DHTsm3YjuWcDqn0BPPPwGK",
+ "oiipWJ792mR+dl4tkJSn66jPbGE6/ta8r1Yvzt6B0bJ/a8o55NHhrLz5m5dLI5Lz38XYeQrGR7btPqRg",
+ "l9tZXAN4G0wPlJ/QoJfp3EwQYrWdVFcHbecrkRGcp6kx1xzX/gMcQZn0f1SgdCxBCT/YwDG0jRp2YKt0",
+ "E+AZaqQz8oN9QnkNpFVACDVBXyminTVdlbmg2RQrWFx+d/aa2FltH/tKkK0SvkJFqL2Kjk0sKJ85LgTZ",
+ "P/gTT48YP87+eG2zaqWTuqh3LAHVtGjKjrOOnwBVpBA7M/IqeAzV5qqaIQw9LJksjFZXj2blI6QJ8x+t",
+ "abpGta/FWodJfnx5e0+VKnhSsn4aqq4piefOwO0q3NsC91MijG5+w5R9ORc20M55rRPAndnB58C2lycr",
+ "zi2lzI645eoKksei3QNnr0jvSohC1kH8kUK/fR3i2Gr/F9grWuKq+3RA7y1Jm0FZP/njX0RPKRecpVhg",
+ "KnZFuyd2x/jZRtTi6hpy/RF3JzRyuKIPFtSheA6Lg08YeEboENc39AdfzaZa6rB/anzLdU01WYFWjrNB",
+ "NvXvbjhbI+MKXI1QfJA54JNCtnyXyCGj7vCkdpscSUaYejOgPH5vvr1xpgWMSb9mHJUIhzYn+FlrIL4A",
+ "qo3mwTRZCVBuPe38Y/Xe9JlhKm4G2w8z/2IojmFdf2bZ1s/dH+rMe72dl9m0fWnaugJJ9c+tKGc76VlZ",
+ "ukmHX2WJygN6ywcRHPFeJt59FCC3Hj8cbQ+57Q1XwfvUEBps0NkNJd7DPcKoXyjpvH5lhFZLUdiC2DCx",
+ "aJUExiNgvGYcmvdsIxdEGr0ScGPwvA70U6mk2oqAo3jaJdAcPdwxhqa0c2/cdahueSiDElyjn2N4G5vH",
+ "VQYYR92gEdwo39XP6BrqDoSJl/h+t0Nk/6kUlKqcEJVh1kLn8ZQY4zCM2z/P1L4A+segLxPZ7lpSe3KO",
+ "uYmGElEXVbYCndAsi5Vs/Ra/EvxKsgolB9hCWtWlPcuSpFh3pV2Ipk9tbqJUcFUVe+byDe44XfAaUYQa",
+ "wheR/A5jostih//G6loO74wL9Dg61NBHdWTHVV/qh07GpF5D04liq2Q8JvBOuTs6mqlvR+hN/3ul9Fys",
+ "2oB85vIT+7hcuEcx/vaduTjC6gy9Yq32aqmLJ2Bgn/BvSKLaWKf9trkSXmW96q3oUKrfqNtvgBh+bW6K",
+ "l99AeG9QdIPa+9V6KIeCfNPBmHSqXXacpmQvCxrMOLIRQja3CKGIW2eHooJsUJD53Os9TjLsydk6Xvgw",
+ "QKgPN+sD9KOPZSUlZc793jCLPmZd1Hs/D2FMPGyzwd1FuFjyQYvdj5uhuG9fjA2/d1+jugaXMl9K2DBR",
+ "ece2j3zyKqH9tfW2Ux15H11/3/CKU31Zc+ig8fbSvQpgl+l08h9/tXFyBLiWu38CU25v03vvXPWlXWue",
+ "apqQuqD0qALTrVtxTKHCWE08Jxu2Xto68E5Yj6xejREH+u9+TSfn2VEXZqyu4sSOEjt28Ve8hstONaWm",
+ "8IiVQrGmrnvsea+RIYaX+EJXUDarP5aP79lAqrGYfxO3IAGOKaJlJgseDP3/5acG1Ok6EtNVndpXaqpf",
+ "wf/AHd/LBgsyGm3189n4wkpndXQa8mmshrwC7t7sbOd5jI42Xy4h1WxzIPvub2vgQWbX1Ntl7NvbQTIe",
+ "q6OXsXjL8VbHBqB9yXF74QmKKN4ZnKHcm2vYPVCkRQ3RcuxTf9Xepm4HYgC5Q2JIRKhY9Ic1JDuHPFM1",
+ "ZSAWfLSV7Q5NBbTBl5yCXNJbzuVJ0lwcTX7pninjT8mMmst0PSrrGgNxhxL0+i9RDOsfr/DhD1W/sujr",
+ "foRaOjnvV0e8cXVDMFey9p34CiKg/G8+MdrOkrNrCN+aQk/VDZWZbxE1vXirTrLnPupl1flXFLpAL+uZ",
+ "WRMb28+jitTbwgjoNBdGjEiGwsjb4ah1LMcDZYNubPl3DLQ1cC1Bujf5UP7NhYJECx9Luw+OfaiwkUW3",
+ "QoIarHFpgRusPPOuKa2DtX4pVpqhLqAoXCCRUFADnQwK4AzPuQ/ZL+13nzjka70etDDV9Hr40QEfFc1U",
+ "D4kh1S+Juy0PJyTdxtjEOLfvPqtYNRwOsu0NKaXIqtRe0OHBqA1yo2tN7WElUTtN2l9lR0cIsjqvYTe3",
+ "SpB/rcHvYAi0lZws6EEVhc4m36v5TcXgXt0LeF/ScjWdlELkyYCz47xfwqdL8dcsvYaMmJvCRw8OvHxD",
+ "HqKNvfZm36x3vmRNWQKH7NGMkDNu47W9Y7tdQ7ozOX+g982/xVmzylbVcka12RWPB75ivSt5R27mh9nP",
+ "wxQYVnfHqewgBwrEbAfKB0l6E3kHajZWK++7mrtv8zREZaGIySTNszMH4mTqEJnm5Y8mTKYvHeS5uEmQ",
+ "ipK6/ldM5zDt2kzSVzxtuhlsLyCIt6HKXaA7sqYZSYWUkIY94ikOFqhCSEhygeE3Mc/gUht5qMC4Zk5y",
+ "sSKiNGquLaPnfSjRZ2mCuWyare2ZWEfNQCEDUC6t1k1jG/fn2fN6zfEv41yuI/YWRLTH8tHP3zhCOfrV",
+ "igDMEQR62NZ0Fnvdp72u7vtQQ6+1aVGwNI7uf60ok8HYkANvF0XWV5Oje1rJZwUO4Crqst3vIbXv0C3G",
+ "+knrmskjj0UAwLDntAXDKP/psWAs8V3HhEaQfF5LrdPWs7usc/Z9PTtL4ym1WusaiBm7kuCy1OwDdJ2X",
+ "c0qq1/4WM837uqXRU0BhCpl9/oMqawnxFhn3+l1XPBBlksMGWg5llzpXpSkoxTYQvpxnO5MMoET7ZFdq",
+ "jnlKQy7XEaXc2pPA1zYGu1HZyiLW7hQ5IDhFxbwtT+wxUWOPkoFow7KKtvCn7vAW2dAzZBE27GEdySmO",
+ "ZhLxxe1jEQdjG5Dmo+eSx0MbwszN2iiCs2W18dQSYXOyVUlv+LASEbE71f72u6+D4GBEdTKpB698We/K",
+ "bRXIQcrYRxi99wOjMocC//5rWPTEi1uub0TGsqYupiIDMNWcZ4zegyY6LGhW0B3J2HIJ0hrzlaY8ozIL",
+ "mzNOUpCaMqPZ7NTtxVoDraxgelCyNdwVB/UMJibjol3KApLvnMpwB6kTPTcRidNetVoMPZHY25V4OgHd",
+ "Guka46oGiMAlQqNsbQ+Y4CggkYJew5HzKPYH7J8Gy5M4258WOOuYKWK+1lvWVhvFuvthCJHbLXgMcb9n",
+ "KCy92OR0SRvNgpZkf0F2afyn5uIc9yyj73AAvNBhGDzM6G03DpwvnBz1U42UYCkfhiihtfxDPki3wEbS",
+ "CLbIMQKtwRbCtQH17X0JHMzqZe23HXpDtOvexTqLgttH/npuYcub7Kt9AeGYsyA3NP/8rl0swHmG+IDs",
+ "3bAxOPQNhki2qFS3y0x4TUfNHfgB729q/hZd0X8Ds0dRrdQN5USYWqz3wTx4s9DcGi6W/gmvDXByg2Pa",
+ "OLYnX5OFy9wuJaRMdUWjG/+6Ru0Kw8emXDbIVh/wvR1a569C34GMl17TIG+aSv2o4694A2FzRL8wUxk4",
+ "uVEqj1Ffjywi+IvxqLCE2oHr4roV4GZfPulkbggJ9xzoFoSsHxno1i8ON3Z5NpjLXDqVgv46R9/WLdxG",
+ "LupmbWOjNPvI3VfOfUxwZfyVBtMdozstQvCJE4Kgkt+f/E4kLPENQ0EeP8YJHj+euqa/P21/Nsf58eOo",
+ "dPbZ4jotjtwYbt4Yxfw6lOlns9kGkko7+1GxPDtEGK0U4eYVUEyC/c0VIvgi75D+ZmNN+kfVvQV3hwA5",
+ "i5jIWluTB1MFyb8j8n5dt0iWL/px0koyvcP6iN5+wH6LRqD+UEczuWi4Wj90d58W11BX2Gxinyrlb9cf",
+ "BM3xPrJqKze3kMhn5LstLcoc3EH55sHiP+DZX55nJ8+e/MfiLydfnaTw/KsXJyf0xXP65MWzJ/D0L189",
+ "P4Eny69fLJ5mT58/XTx/+vzrr16kz54/WTz/+sV/PDB8yIBsAZ34ajyT/42P9SZnb8+TSwNsgxNash9h",
+ "Z98FNGTsXxykKZ5EKCjLJ6f+p//pT9gsFUUzvP914op9TNZal+p0Pr+5uZmFXeYrDHZItKjS9dzP03uS",
+ "8Oztee0lslYg3FGbJ+ute54UzvDbu+8uLsnZ2/NZ8F796eRkdjJ7gs+bl8BpySank2f4E56eNe773BHb",
+ "5PTjp+lkvgaaY2yg+aMALVnqP0mg2c79X93Q1QrkzD3DaH7aPJ17sWL+0QV9fNr3bR6+aDL/2IqNyQ70",
+ "xBcP5h99Ib/9rVuV8lxMUNBhJBT7ms0XWB9kbFNQQePhpaCyoeYfUVwe/H3uChrEP6LaYs/D3AeQxVu2",
+ "sPRRbw2snR4p1em6Kucf8T9InwFYNn1orrd8jraP+cfWatzn3mravzfdwxabQmTgARbLpS1Muu/z/KP9",
+ "N5gItiVIZgQ/G7Ln7Dz1sTrPJqeT74JGL9eQXuNbHtbIh+fl6clJJLcy6EXs8aWLHDJz9p6fPB/RgQsd",
+ "dnJV5/odf+HXXNxwgpk4lpdXRUHlDmUkXUmuyM8/ErYk0J2CKT8D8g+6UujwxocDJtNJCz0fPjmk2cjz",
+ "OVZT2jW49D/veBr9sb/N3UfTYj/PP7aL9rfoR60rnYmboC9qU9YU0J+vfsaq9ff8hjJt5CMXwolFFfud",
+ "NdB87vK1O782KVK9L5j3FfwYuiKiv87rmrXRj11OFfvqTupAI28Z9Z8bqSWUAian74P7//2HTx/MN2la",
+ "46fmUjudzzEsai2Unk8+TT92Lrzw44eaxnwZm0kp2Qaz4j58+n8BAAD//229Af14wQAA",
}
// GetSwagger returns the content of the embedded swagger specification file
diff --git a/daemon/algod/api/server/v2/generated/model/types.go b/daemon/algod/api/server/v2/generated/model/types.go
index 583705783..3589e9e11 100644
--- a/daemon/algod/api/server/v2/generated/model/types.go
+++ b/daemon/algod/api/server/v2/generated/model/types.go
@@ -106,12 +106,24 @@ const (
GetTransactionProofParamsFormatMsgpack GetTransactionProofParamsFormat = "msgpack"
)
+// Defines values for GetLedgerStateDeltaForTransactionGroupParamsFormat.
+const (
+ GetLedgerStateDeltaForTransactionGroupParamsFormatJson GetLedgerStateDeltaForTransactionGroupParamsFormat = "json"
+ GetLedgerStateDeltaForTransactionGroupParamsFormatMsgpack GetLedgerStateDeltaForTransactionGroupParamsFormat = "msgpack"
+)
+
// Defines values for GetLedgerStateDeltaParamsFormat.
const (
GetLedgerStateDeltaParamsFormatJson GetLedgerStateDeltaParamsFormat = "json"
GetLedgerStateDeltaParamsFormatMsgpack GetLedgerStateDeltaParamsFormat = "msgpack"
)
+// Defines values for GetTransactionGroupLedgerStateDeltasForRoundParamsFormat.
+const (
+ GetTransactionGroupLedgerStateDeltasForRoundParamsFormatJson GetTransactionGroupLedgerStateDeltasForRoundParamsFormat = "json"
+ GetTransactionGroupLedgerStateDeltasForRoundParamsFormatMsgpack GetTransactionGroupLedgerStateDeltasForRoundParamsFormat = "msgpack"
+)
+
// Defines values for GetPendingTransactionsParamsFormat.
const (
GetPendingTransactionsParamsFormatJson GetPendingTransactionsParamsFormat = "json"
@@ -400,6 +412,9 @@ type Box struct {
// Name \[name\] box name, base64 encoded
Name []byte `json:"name"`
+ // Round The round for which this information is relevant
+ Round uint64 `json:"round"`
+
// Value \[value\] box value, base64 encoded.
Value []byte `json:"value"`
}
@@ -524,6 +539,13 @@ type KvDelta struct {
// LedgerStateDelta Ledger StateDelta object
type LedgerStateDelta = map[string]interface{}
+// LedgerStateDeltaForTransactionGroup Contains a ledger delta for a single transaction group
+type LedgerStateDeltaForTransactionGroup struct {
+ // Delta Ledger StateDelta object
+ Delta LedgerStateDelta `json:"Delta"`
+ Ids []string `json:"Ids"`
+}
+
// LightBlockHeaderProof Proof of membership and position of a light block header.
type LightBlockHeaderProof struct {
// Index The index of the light block header in the vector commitment tree
@@ -589,10 +611,10 @@ type PendingTransactionResponse struct {
// InnerTxns Inner transactions produced by application execution.
InnerTxns *[]PendingTransactionResponse `json:"inner-txns,omitempty"`
- // LocalStateDelta \[ld\] Local state key/value changes for the application being executed by this transaction.
+ // LocalStateDelta Local state key/value changes for the application being executed by this transaction.
LocalStateDelta *[]AccountStateDelta `json:"local-state-delta,omitempty"`
- // Logs \[lg\] Logs for the application being executed by this transaction.
+ // Logs Logs for the application being executed by this transaction.
Logs *[][]byte `json:"logs,omitempty"`
// PoolError Indicates that the transaction was kicked out of this node's transaction pool (and specifies why that happened). An empty string indicates the transaction wasn't kicked out of this node's txpool due to an error.
@@ -608,8 +630,35 @@ type PendingTransactionResponse struct {
Txn map[string]interface{} `json:"txn"`
}
+// SimulateRequest Request type for simulation endpoint.
+type SimulateRequest struct {
+ // AllowEmptySignatures Allow transactions without signatures to be simulated as if they had correct signatures.
+ AllowEmptySignatures *bool `json:"allow-empty-signatures,omitempty"`
+
+ // AllowMoreLogging Lifts limits on log opcode usage during simulation.
+ AllowMoreLogging *bool `json:"allow-more-logging,omitempty"`
+
+ // ExtraOpcodeBudget Applies extra opcode budget during simulation for each transaction group.
+ ExtraOpcodeBudget *uint64 `json:"extra-opcode-budget,omitempty"`
+
+ // TxnGroups The transaction groups to simulate.
+ TxnGroups []SimulateRequestTransactionGroup `json:"txn-groups"`
+}
+
+// SimulateRequestTransactionGroup A transaction group to simulate.
+type SimulateRequestTransactionGroup struct {
+ // Txns An atomic transaction group.
+ Txns []json.RawMessage `json:"txns"`
+}
+
// SimulateTransactionGroupResult Simulation result for an atomic transaction group
type SimulateTransactionGroupResult struct {
+ // AppBudgetAdded Total budget added during execution of app calls in the transaction group.
+ AppBudgetAdded *uint64 `json:"app-budget-added,omitempty"`
+
+ // AppBudgetConsumed Total budget consumed during execution of app calls in the transaction group.
+ AppBudgetConsumed *uint64 `json:"app-budget-consumed,omitempty"`
+
// FailedAt If present, indicates which transaction in this group caused the failure. This array represents the path to the failing transaction. Indexes are zero based, the first element indicates the top-level transaction, and successive elements indicate deeper inner transactions.
FailedAt *[]uint64 `json:"failed-at,omitempty"`
@@ -622,13 +671,31 @@ type SimulateTransactionGroupResult struct {
// SimulateTransactionResult Simulation result for an individual transaction
type SimulateTransactionResult struct {
- // MissingSignature A boolean indicating whether this transaction is missing signatures
- MissingSignature *bool `json:"missing-signature,omitempty"`
+ // AppBudgetConsumed Budget used during execution of an app call transaction. This value includes budged used by inner app calls spawned by this transaction.
+ AppBudgetConsumed *uint64 `json:"app-budget-consumed,omitempty"`
+
+ // LogicSigBudgetConsumed Budget used during execution of a logic sig transaction.
+ LogicSigBudgetConsumed *uint64 `json:"logic-sig-budget-consumed,omitempty"`
// TxnResult Details about a pending transaction. If the transaction was recently confirmed, includes confirmation details like the round and reward details.
TxnResult PendingTransactionResponse `json:"txn-result"`
}
+// SimulationEvalOverrides The set of parameters and limits override during simulation. If this set of parameters is present, then evaluation parameters may differ from standard evaluation in certain ways.
+type SimulationEvalOverrides struct {
+ // AllowEmptySignatures If true, transactions without signatures are allowed and simulated as if they were properly signed.
+ AllowEmptySignatures *bool `json:"allow-empty-signatures,omitempty"`
+
+ // ExtraOpcodeBudget The extra opcode budget added to each transaction group during simulation
+ ExtraOpcodeBudget *uint64 `json:"extra-opcode-budget,omitempty"`
+
+ // MaxLogCalls The maximum log calls one can make during simulation
+ MaxLogCalls *uint64 `json:"max-log-calls,omitempty"`
+
+ // MaxLogSize The maximum byte number to log during simulation
+ MaxLogSize *uint64 `json:"max-log-size,omitempty"`
+}
+
// StateDelta Application state delta.
type StateDelta = []EvalDeltaKeyValue
@@ -862,12 +929,21 @@ type DryrunResponse struct {
Txns []DryrunTxnResult `json:"txns"`
}
+// GetBlockTimeStampOffsetResponse defines model for GetBlockTimeStampOffsetResponse.
+type GetBlockTimeStampOffsetResponse struct {
+ // Offset Timestamp offset in seconds.
+ Offset uint64 `json:"offset"`
+}
+
// GetSyncRoundResponse defines model for GetSyncRoundResponse.
type GetSyncRoundResponse struct {
// Round The minimum sync round for the ledger.
Round uint64 `json:"round"`
}
+// LedgerStateDeltaForTransactionGroupResponse Ledger StateDelta object
+type LedgerStateDeltaForTransactionGroupResponse = LedgerStateDelta
+
// LedgerStateDeltaResponse Ledger StateDelta object
type LedgerStateDeltaResponse = LedgerStateDelta
@@ -942,7 +1018,7 @@ type NodeStatusResponse struct {
// UpgradeNodeVote This node's upgrade vote
UpgradeNodeVote *bool `json:"upgrade-node-vote,omitempty"`
- // UpgradeVoteRounds Total voting ounds for current upgrade
+ // UpgradeVoteRounds Total voting rounds for current upgrade
UpgradeVoteRounds *uint64 `json:"upgrade-vote-rounds,omitempty"`
// UpgradeVotes Total votes cast for consensus upgrade
@@ -984,6 +1060,9 @@ type PostTransactionsResponse struct {
// SimulateResponse defines model for SimulateResponse.
type SimulateResponse struct {
+ // EvalOverrides The set of parameters and limits override during simulation. If this set of parameters is present, then evaluation parameters may differ from standard evaluation in certain ways.
+ EvalOverrides *SimulationEvalOverrides `json:"eval-overrides,omitempty"`
+
// LastRound The round immediately preceding this simulation. State changes through this round were used to run this simulation.
LastRound uint64 `json:"last-round"`
@@ -992,9 +1071,6 @@ type SimulateResponse struct {
// Version The version of this response object.
Version uint64 `json:"version"`
-
- // WouldSucceed Indicates whether the simulated transactions would have succeeded during an actual submission. If any transaction fails or is missing a signature, this will be false.
- WouldSucceed bool `json:"would-succeed"`
}
// StateProofResponse Represents a state proof and its corresponding message
@@ -1012,6 +1088,11 @@ type SupplyResponse struct {
TotalMoney uint64 `json:"total-money"`
}
+// TransactionGroupLedgerStateDeltasForRoundResponse defines model for TransactionGroupLedgerStateDeltasForRoundResponse.
+type TransactionGroupLedgerStateDeltasForRoundResponse struct {
+ Deltas []LedgerStateDeltaForTransactionGroup `json:"Deltas"`
+}
+
// TransactionParametersResponse TransactionParams contains the parameters that help a client construct
// a new transaction.
type TransactionParametersResponse struct {
@@ -1150,6 +1231,15 @@ type GetTransactionProofParamsHashtype string
// GetTransactionProofParamsFormat defines parameters for GetTransactionProof.
type GetTransactionProofParamsFormat string
+// GetLedgerStateDeltaForTransactionGroupParams defines parameters for GetLedgerStateDeltaForTransactionGroup.
+type GetLedgerStateDeltaForTransactionGroupParams struct {
+ // Format Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.
+ Format *GetLedgerStateDeltaForTransactionGroupParamsFormat `form:"format,omitempty" json:"format,omitempty"`
+}
+
+// GetLedgerStateDeltaForTransactionGroupParamsFormat defines parameters for GetLedgerStateDeltaForTransactionGroup.
+type GetLedgerStateDeltaForTransactionGroupParamsFormat string
+
// GetLedgerStateDeltaParams defines parameters for GetLedgerStateDelta.
type GetLedgerStateDeltaParams struct {
// Format Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.
@@ -1159,6 +1249,15 @@ type GetLedgerStateDeltaParams struct {
// GetLedgerStateDeltaParamsFormat defines parameters for GetLedgerStateDelta.
type GetLedgerStateDeltaParamsFormat string
+// GetTransactionGroupLedgerStateDeltasForRoundParams defines parameters for GetTransactionGroupLedgerStateDeltasForRound.
+type GetTransactionGroupLedgerStateDeltasForRoundParams struct {
+ // Format Configures whether the response object is JSON or MessagePack encoded. If not provided, defaults to JSON.
+ Format *GetTransactionGroupLedgerStateDeltasForRoundParamsFormat `form:"format,omitempty" json:"format,omitempty"`
+}
+
+// GetTransactionGroupLedgerStateDeltasForRoundParamsFormat defines parameters for GetTransactionGroupLedgerStateDeltasForRound.
+type GetTransactionGroupLedgerStateDeltasForRoundParamsFormat string
+
// ShutdownNodeParams defines parameters for ShutdownNode.
type ShutdownNodeParams struct {
Timeout *uint64 `form:"timeout,omitempty" json:"timeout,omitempty"`
@@ -1208,3 +1307,6 @@ type TealCompileTextRequestBody = TealCompileTextBody
// TealDryrunJSONRequestBody defines body for TealDryrun for application/json ContentType.
type TealDryrunJSONRequestBody = DryrunRequest
+
+// SimulateTransactionJSONRequestBody defines body for SimulateTransaction for application/json ContentType.
+type SimulateTransactionJSONRequestBody = SimulateRequest
diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go b/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go
index 5bf532b49..41fef3739 100644
--- a/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go
+++ b/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go
@@ -130,171 +130,181 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9/XPcNrLgv4KafVX+uKEkf+5aVal3ip1kdXEcl6Vk7z3bl2DInhmsOABDgNJMfPrf",
- "X6EbIEESnOFIir2p2p9sDYFGo9FoNPoLnyapWhVKgjR6cvxpUvCSr8BAiX/xNFWVNInI7F8Z6LQUhRFK",
- "To79N6ZNKeRiMp0I+2vBzXIynUi+gqaN7T+dlPBbJUrIJsemrGA60ekSVtwCNpvCtq4hrZOFShyIEwJx",
- "+mpyveUDz7IStO5j+aPMN0zINK8yYKbkUvPUftLsSpglM0uhmevMhGRKAlNzZpatxmwuIM/0gZ/kbxWU",
- "m2CWbvDhKV03KCalyqGP50u1mgkJHiuokaoXhBnFMphjoyU3zI5gcfUNjWIaeJku2VyVO1AlJEJ8QVar",
- "yfH7iQaZQYmrlYK4xP/OS4DfITG8XICZfJzGJjc3UCZGrCJTO3XUL0FXudEM2+IcF+ISJLO9DtgPlTZs",
- "BoxL9u7bl+zJkycv7ERW3BjIHJMNzqoZPZwTdZ8cTzJuwH/u8xrPF6rkMkvq9u++fYnjn7kJjm3FtYb4",
- "ZjmxX9jpq6EJ+I4RFhLSwALXocX9tkdkUzQ/z2CuShi5JtT4ThclHP+LrkrKTboslJAmsi4MvzL6HJVh",
- "QfdtMqxGoNW+sJQqLdD3R8mLj58eTR8dXf/l/Uny3+7PZ0+uR07/ZQ13BwWiDdOqLEGmm2RRAsfdsuSy",
- "T493jh/0UlV5xpb8Ehefr1DUu77M9iXRecnzyvKJSEt1ki+UZtyxUQZzXuWG+YFZJXMrpiw0x+1MaFaU",
- "6lJkkE2t9L1ainTJUq4JBLZjVyLPLQ9WGrIhXovPbstmug5JYvG6ET1wQv+6xGjmtYMSsEZpkKS50pAY",
- "teN48icOlxkLD5TmrNL7HVbsfAkMB7cf6LBF2knL03m+YQbXNWNcM8780TRlYs42qmJXuDi5uMD+bjaW",
- "aitmiYaL0zpH7eYdIl+PGBHizZTKgUsknt93fZLJuVhUJWh2tQSzdGdeCbpQUgNTs39Cauyy/5+zH98w",
- "VbIfQGu+gLc8vWAgU5VBdsBO50wqE7CG4yWkoe05NA+HV+yQ/6dWlidWelHw9CJ+oudiJSKz+oGvxapa",
- "MVmtZlDaJfVHiFGsBFOVcgghgriDFVd83R/0vKxkiuvfDNvS5Sy3CV3kfIMEW/H1V0dTh45mPM9ZATIT",
- "csHMWg7qcXbs3eglpapkNkLNMXZNg4NVF5CKuYCM1VC2YOKG2YWPkPvh0yhfAToeyCA69Sg70JGwjvCM",
- "3d32Cyv4AgKWOWA/OeGGX426AFkzOptt8FNRwqVQla47DeCIQ2/XwKUykBQlzEWEx84cOayAoTZOAq+c",
- "DpQqabiQkFnhjEgrAySsBnEKBtx+3+mf4jOu4fnToTO++Tpy9eequ+pbV3zUamOjhLZk5Oi0X92GjWtW",
- "rf4j7ofh2FosEvq5t5BicW5Pm7nI8ST6p10/T4ZKoxBoEcKfTVosJDdVCccf5EP7F0vYmeEy42Vmf1nR",
- "Tz9UuRFnYmF/yumn12oh0jOxGCBmjWv0woXdVvSPhRcXx2YdvVe8VuqiKsIJpa2L62zDTl8NLTLB3Jcx",
- "T+rbbnjxOF/7y8i+Pcy6XsgBJAdpV3Db8AI2JVhseTrHf9Zz5Cc+L3+3/xRFbnubYh4jreVjdySj+cCZ",
- "FU6KIhcpt0R85z7br1YIAF0keNPiEA/U408BikWpCiiNIKC8KJJcpTxPtOEGIf1HCfPJ8eQvh4395ZC6",
- "68Ng8Ne21xl2siorqUEJL4o9YLy1qo/eIiysgMZPKCZI7KHSJCQtomUlYUVwDpdcmoPmytKSB/UGfu9G",
- "auhN2g7Ru3MFGyQ4o4Yz0KQBU8N7mgWkZ0hWhmRFhXSRq1n9w/2TomgoiN9PioLogdojCFTMYC200Q9w",
- "+rzZSeE4p68O2HchbFTFlcw39nAgVcOeDXN3arlTrLYtuTk0EO9phsupygO7NJ4MVs2/C47Da8VS5Vbr",
- "2ckrtvHfXduQzezvozr/OVgspO0wc+FFy1GO7jj4S3C5ud/hnD7jOHPPATvp9r0Z21gocYa5Ea9sXU+C",
- "u4WONQmvSl4Qgu4LnaVC4iWNGhGut5SmIwVdFOdgDwe8hljdeK/t3A9RTJAVOjh8nav04u9cL+9gz888",
- "rP72w2HYEngGJVtyvTyYxLSMcHs10MZsMdsQL/hsFgx1UE/xrqa3Y2oZNzyYmsM3rpYQ6bEfCj0oI3eX",
- "H/E/PGf2s93bVvQT2AN2jgJM03Z2TobM3vbpgkAj2QZohVBsRRd8Zm/de2H5shk8vk6j1ugbsim4FXKT",
- "wBVS6zvfBl+rdQyHr9W6twXUGvRd8IeFg2qkgZUegd8rh5nC9Xfk42XJN30iI+wxRLYTtKqrxt0gwxPf",
- "jtIYZ09mqryZ9OmIFckakzPjFmogfKcdImHTqkgcK0bMVtSgA6jx8m0XGl3wMYq1qHBm+B9ABW2h3gUV",
- "2oDumgpqVYgc7oD1l1GhP+ManjxmZ38/efbo8S+Pnz23LFmUalHyFZttDGh2393NmDabHB70Z4a3oyo3",
- "cejPn3pDZRtuDI5WVZnCihd9UGQAJRWImjHbrk+1Nplx1jWCYzbnOVhJTmRnZNu3qL0S2mpYq9mdLMYQ",
- "wbJmlIw5TDLYyUz7Tq8ZZhNOsdyU1V1cZaEsVRmxr+EWMypVeXIJpRYq4k1561ow18Krt0X3d8KWXXHN",
- "7Nho+q0kKhQRzjJrOV7uE+jztWxos1Xy03wjs3PjjlmXNvG9JVGzAsrErCXLYFYtWjehealWjLMMO+IZ",
- "/R2Ys41M0ap2F0w6fE1bCYkmfr2RaXBnswuVQ7ZoLcLt72Zdqnj7HA11T0fQseR4jZ/xWv8KcsPvXH/p",
- "DhDD/aVfSEKWZbYh3oJfi8XSBArm21Kp+d3jGBslhih+IPU8t336SvoblYGdbKXv4DBugDW8btc05HA+",
- "U5VhnEmVAVpUKh0/pgc89+gyRE+nCU9+sySNewaWkVJe2dlWBUM/Xk9yNB0TnhL3JkgaPeDFqN1P1IqG",
- "I69wXgLP7K0eJFMz5ypwTgycJEcnpPEHnVMSInuphVdRqhS0hixxJoqdqPl2JETMFjoh4ohwPQrTis15",
- "eWtkLy534nkBmwRd5prd//5n/eAL4GuU4fkOwmKbGHnrC5/zB/WxHjf8NobrDh6yHS+BeZlrb5dWQORg",
- "YIiEe9FkcP26GPVW8fZkuYQSPTN/KMf7QW7HQDWqfzC/3xbbqhgIBHMXnXOxQrud5FJpSJXMdBRYzrVJ",
- "doll26h1G7MzCCRhTBIj4AGl5DXXhryJQmZoBKHjBMchBcUOMYzwoEJqIf/sddE+7NSeg1JXulZMdVUU",
- "qjSQxeYgYb1lrDewrsdS8wB2rf0axSoNuyAPUSmA74hFMyECcVMb3Z27vT85NE3bc34TJWULiYYQ2xA5",
- "860C6obBMAOICN0QmhhH6A7n1BE404k2qiistDBJJet+Q2Q6o9Yn5qembZ+5uGnO7UyBxhgc195hfkWU",
- "pTCoJbdXaITMVvzC6h54ISa3Zx9nuxkTLWQKyTbOt9vyzLYKt8DOTVoVi5JnkGSQ800f6E/0mdHnbQBw",
- "xZuLjzKQUDxLfNEbTvbhA1tAK4SnY8ojwy8stVvQ3jwaBnG9d0DOAGHHhJPjo3s1KBwrukQeHk6bljoC",
- "EU/DS2XsihM7IMZOoI/Bd4AMNeSbUwI7J821rDvEf4F2A9RqxP6DbEAPTaGBv9cEBoxpLlI42C4d6d4R",
- "wFGpOSjFdoiRoR07YNl7y0sjUlHgVed72Nz5za87QNTfxDIwXOSQseAD3QKLsD+jQIwuzJvdBEcZYfro",
- "96wwkenkQqPG00b+AjZ45X5LEX7nQVzgHVxlI1Dt8cQlQ0R93JDVwMMmsOapyTdWTzNL2LArKIHparYS",
- "xlDkbvuma1SRhACiBu4tIzpvDkXH+RUY4146Q1DB9PpLMZ3QlWA7fuede0GLHO4qUCiVjzAe9YgRxWCU",
- "458Vyq66cEHEPozUc1ILSSe00ZVXn/73dIvMOAP2X6piKZd446oM1CqNKlFPQP3RjmA1sHpM5+JvKAQ5",
- "rIAukvjl4cPuxB8+dGsuNJvDlY+8tw275Hj4EM04b5U2rc11B6ZCu91OI8cHWv7x3HPBCx2ZstvF7CCP",
- "Wcm3HeC1u8DuKa0d49rp31oAdHbmeszcQx4Z515HuKOM+gHo2Lxx3c/Eqsq5uQv3xVZ9tL5PiNUKMsEN",
- "5BtWlJACRVdbBUsTLhY1RnFX6ZLLBerVpaoWLvCH4KBgrDRZMMpK9kBElQ+zlsmiVFURE5Qu2NMH2Fu1",
- "A7i9+QSExM6k51/xejyXUzHmBPMED1bnOwtzyKswnQxeDC1RL5uLIRGnnSUQpwKmPSS6SlOAaAhw7MpV",
- "T7WTDdnktziAVm2oSoqBYjw1Fc9DrmOnc8blpp0myUWurRQUmmE727mJq53S3HwOy5zn5JuNJFWEO6Wl",
- "8QUr35C0S4qRfgdkEqsN9TkjZEC7vSwb/zE2/AZ0DMv+wEHQVfNxKO7K3r/zzR2oQQSIlVCUoPHQCu1W",
- "mr6qeZj75E41vdEGVn3TPnX9ZUDQvBu8QCqZCwnJSknYRNN9hYQf8GNUcODBOdAZVZihvt1bSQv/Dlrt",
- "ccZw423pi6sdyKK3dcDhHSx+F27HqxNmfaHVEvKCcZbmAm2aSmpTVqn5IDlaTYLNFgnM8PfDYTvaS98k",
- "briL2NUcqA+SY1BObUuJOpPnEDEcfAvgzWm6WixAd+QnmwN8kK6VkKySwuBYK7teCS1YASVGRxxQyxXf",
- "WBGIZr/foVRsVpm2TMbME22suCQXkx2GqfkHyQ3Lwd6pfxDyfI3gvIvW84wEc6XKi5oK8SNkARK00Ek8",
- "gOQ7+oqxfW76Sxfnh5nC9JmcEhZ+k56yQaNKk/36/+7/5/H7k+S/efL7UfLifx1+/PT0+sHD3o+Pr7/6",
- "6v+3f3py/dWD//yP2Ep53GN5EQ7z01fusnb6CjXyxivRw/2zWaRXQiZRJgt97x3eYvcxB9Ax0IO2vcYs",
- "4YM0a2kZ6ZLnIrMq103YoSvienuRdkeHa1oL0bHP+LnuqefeQsqwiJDpiMYbH+P9mKt4BhK6yVxSEe6X",
- "eSVpKb2iSwH2PvZFzad1lhkVoDhmmIK05D5wy/35+NnzybRJHaq/T6YT9/VjhJNFto5qh7COXV/cBsGN",
- "cU+zgm80DCigiHs0zIeiDUKwK7D3Xr0UxeeXFNqIWVzC+bBlZwZZy1NJ8cR2/6DTbeNs+Wr++fE2pdXD",
- "C7OMJaa3NAVs1awmQCcQoijVJcgpEwdw0DVDZPZq5gKOcuBzTJDGi54ak4ZR7wNiNM8VAdXDiYy668f4",
- "B5VbJ62vpxN3+Os718cd4Bhe3TFrD5v/2yh277tvztmhE5j6HuUqEugguyxya3UJFK0QGSvNqBwHJWt+",
- "kB/kK5gLKez34w8y44YfzrgWqT6sNJRf85zLFA4Wih37nIxX3PAPsqdpDVbMCbJhWFHNcpGyi1AjbtiT",
- "qiD0IXz48J7nC/Xhw8detEBff3VDReULDZBcCbNUlUlcDndSwhUvY94YXefwImQq0rBt1ClzsEkUuxxx",
- "Bz8u83hR6G4uX3/6RZHb6QdsqF2mml0ypo0qvS5iFRTCBtf3jXIHQ8mvvAmj0qDZrytevBfSfGTJh+ro",
- "6AmwVnLbr+7Itzy5KWC0IWMw17Brv8CJ070G1qbkScEXMa/Phw/vDfACVx/15RVesvOcYbdWUp0PGkZQ",
- "zQQ8PYYXgPDYO0EIJ3dGvXy9nvgU8BMuIbax6kbjir7pegVpdjderk6qXm+VKrNM7N6OzkpbFvcrU5fx",
- "WFgly8cHaLHAGExX8WQGLF1CeuFKUcCqMJtpq7sPQXGKphcdQlOREkqSwTR5tJnPgFVFxp0q3rUgzTZM",
- "gzE+CPQdXMDmXDVZ9vskKLfzZfXQRkVODbRLy6zhtnUwuovv4pzQxFUUPu0U8488WxzXfOH7DG9kUnnv",
- "YBPHmKKVzzlECF5GCEHMP0CCG0zUwrsV68emZ28ZMzr5IgVLvOxnrklzeXIhSeFs0MBN31eAFY/UlWYz",
- "bvV25Yr1UE5oIMUqzRcwoCGHbouRmZctVwcC2XXuRU86Ne8eaL3zJooyNU7snKOcAvaLZRW8zHQC0fxI",
- "5BlzTgCswecINstRTaoj9kjo8LLlPqKiYkOoxRkYStkoHB6NNkVCzWbJta8jhOWW/F4epQP8gTnO2ypb",
- "hAb9oKZSbV/3Mre7T3u3S1ffwhe18JUswqvliKoUVsPHsO3YciiJClAGOSxo4tTYM0qTb90skMXjx/k8",
- "FxJYEgvH4lqrVFAhqOaYcWOA1Y8fMkYmYDYaQoyNA7TR44uA2RsV7k252AdJ6fLFuYeNvuLgb4intlCA",
- "slV5VGFFuBhwIKVeAnAXw1efX51IUgTDhJwyK+YueW7FnLvxNUB6BRZQbe2UU3AxBw+G1NktFng6WPaa",
- "Ex1FN5lNqDN5pOMK3RaMZ2qdUG5bVOOdrWeW36Mx25hpF9uYVMrinmYztcY4FjxaKEZ4By7DeHg0ghv+",
- "WmjkV+w3dJoTMtuG3a5NxbhQI8s4c17NLkPqxJihBzSYIXa5H1SnuBECHWNHU+rVXX53XlLb6kn/MG9O",
- "tWlTdcmnw8S2/9AWiq7SAP36Vpi6nsTbrsYStVO0wzHapTQCFTLG9FZM9J00fVeQhhzwUpC0lKjkIua6",
- "s3cbwBPnzHcLjBdYsIPLzYMgxqeEhdAGGiO6D0n4EuZJjnXClJoPz84U5dzO751S9TFFhWiwY2uan30G",
- "GCM7F6U2CXogolOwjb7VeKn+1jaN60rtKCKqqimyuGzAYS9gk2Qir+L86sb9/pUd9k0tEnU1Q3krJMWG",
- "zLAKbDS2cMvQFH66dcKvacKv+Z3Nd9xusE3twKVll/YYf5J90ZG828RBhAFjzNFftUGSbhGQQUpoXzoG",
- "ehNtTkwJPdhmfe1tpszD3hk24hNTh84oghSdS2Aw2DoLgW4iq5YIExRR7edqDuwBXhQiW3dsoQR18MbM",
- "9zJ4+NJTHSrg6jpgOygQ2D1j6SIl6HaVsUbBp3K4rSIfB6Moc96uBRYKhHAooX0x9z6h6nSyXbQ6B55/",
- "D5ufbVuczuR6Ormd6TRGawdxB63f1ssbpTO65smU1vKE7ElyXhSluuR54gzMQ6xZqkvHmtjc26M/s6iL",
- "mzHPvzl5/dahfz2dpDnwMqlVhcFZYbviTzMrKmg2sEF8sWh75/M6O6mSweLXVZhCo/TVElzV3UAb7ZUH",
- "bBwOwVZ0Rup5PEJop8nZ+UZoilt8JFDULpLGfEcekrZXhF9ykXu7mcd2IJoHJzeuxmRUKoQAbu1dCZxk",
- "yZ2Km97uju+Ohrt2yKRwrC11gVdU+lozJbsudAwv3hTO677iWNyPrCJ94SSrFVoSEp2LNG5jlTNtmUOS",
- "78w2Zth4QBm1ECsx4IqVlQhg2WZ6xEW3g2QwRpSYvlDkEO1myj1rUknxWwVMZCCN/VTiruxsVKym6Kzt",
- "/ePU6g79sRxgstA34G+jY4SFLbsnHiKxXcEIPXU9dF/VV2Y/0doiheHWjUtiD4d/OGLvSNzirHf84biZ",
- "gheXbY9b+ApJX/5ZxqBy1LufQPGXV1dhc2CM6JMmQifzUv0O8XseXo8jqTi+lKfAKJffQY6IOW+sO83L",
- "LM3og8s9pN2EVqh2kMIA1+PKB245rCnoLdRc0lLTCwOtWLc4w4RRpYcEv2EYh3MvEjfnVzMeK7holQyL",
- "00njAG7Z0o1ivrOnva4TG2h0FviS67aCsqwLKJssuX7FlhsqDDTsaFWh0QyQa0OdYEr+v1yrCJhKXnFJ",
- "D1XYfrSVXG8NZPyyva5UiTUSdNzsn0EqVjyPaw5Z2jfxZmIh6A2GSkNQ5N8BovdtiIvcQwl1uo4jzemc",
- "HU2Dl0bcamTiUmgxywFbPKIWM65RkteGqLqLnR5Is9TY/PGI5stKZiVkZqmJsFqxWqnD603tvJqBuQKQ",
- "7AjbPXrB7qPbTotLeGCp6M7nyfGjF2h0pT+OYgeAe0NjmzTJUJz8w4mTOB+j35JgWMHtoB5E08npEa1h",
- "wbVlN1HXMXsJWzpZt3svrbjkC4hHiqx24ER9cTXRkNahi8zoBRhtSrVhwsTHB8OtfBqIPrfij9BgqVqt",
- "hFk5545WK8tPTQV/GtSDo+dkXPFVj5f/iD7SwruIOpfIz2s0pfMtNmv0ZL/hK2iTdco4FcbIRRO94EtC",
- "s1Nfdwer0dZFaIk2diw7dVRzMJhhzopSSIMXi8rMk7+xdMlLnlrxdzCEbjJ7/jRSgbddCVLuh/hnp3sJ",
- "GsrLOOnLAbb3OoTry+5LJZOVlSjZgybbI9iVg87cuNtuyHe4HfRYpcxCSQbZrWqxGw8k9a0YT24BeEtW",
- "rOezFz/uPbPPzplVGWcPXtkV+unda6dlrFQZK6bXbHencZRgSgGXGLsXXyQL85ZrUeajVuE22H9Zz4NX",
- "OQO1zO/l2EXgaxW5nfqq0LUl3cWqR6wDQ9vUfrBsMHOgpqxdgffzO/288bnvfLJfPK74RxfZL7ykSGQ/",
- "g4FFDKqDR5czq78H/m/OvlbrsYva2SF+Yf8FSBMlSSXy7OcmK7NTfL3kMl1G/Vkz2/GX5pmoenJ0PkVr",
- "1i25lJBHwZEu+IvXGSNa7T/V2HFWQo5s260HT9PtTK5BvI2mR8oPaMkrTG4HCKnaTnirA6rzhcoYjtMU",
- "SGukZ/8dgaDa828VaBNLHsIPFNSFdkt736ViwwxkhrfFA/YdvQS7BNYqf4O3tLqKgCt9Swb1qsgVz6ZY",
- "yOH8m5PXjEalPvTYCRU7XuAlpT2Ljr0qqP04LjzYv1sST10YD2d7LLWdtTZYjUobvipiyaG2xblvgBmo",
- "oQ0fry8hdQ7Yq+BNR8ojtSAsP8xFubI3rhoa6S7IE/Y/xvB0iVeylkgdZvnxVbo9V+rgZbz6hZu6ICLu",
- "O4u3K9RNdbqnTNl785XQ9AAoXEI7H7VOznYmAZ+f2p5eWUlJnBLVPbYVD7gJ2T1yFKjhzfxRzDqE31Mh",
- "pyL3+xYtP8Ne0QJN3QrovSfxKLuxfrnEP+yccqmkSLE8Uuxodi+FjvGBjagk1TWy+i3udmhkc0Xrrtdh",
- "co6Kg5XYvSB0hOsb4YOvdlGJO+hPg09SLrlhCzDaSTbIpv75AGcHFFKDK3CJ78oGclKVLb8iSsioqzqp",
- "XRp7shGmxQxc7L613964az/Gi18IiQq+I5sLTSdLHT5kaOytQBi2UKDdfNq5wfq97XOAabIZrD8e+IcP",
- "qRoMuuXstMkH3Qd14j3SzgNs2760bV2doPrnVgQyDXpSFG7Q4cclovqAWctBAkc8i4l37QTEreGH0Law",
- "29ZQEjxPLaPBJTqiocBzuMcY9UMLnUd8rNJKHIUtGIVwRSsYCBlB47WQ0DzLGTkg0uiRgAuD+3Wgn05L",
- "bkgFHCXTzoHn6H2OCTRtnOvhtqC6tYQsSXCOfozhZWzeiBgQHHWDRnHjclO/Bmq5O1AmXuIzxI6Q/Rcf",
- "UKtySlSGGQWdNyBigsMKbv/KTPsA6G+Dvk5E3U3JaefscxINJYnOqmwBJuFZFqtI9TV+ZfjVF5eCNaRV",
- "XZiyKFiKNVHaRWL63OYGSpXU1WrLWL7BLYcLHlWJcEP4sItfYUxCmW3w31hVxuGVcUEYe4cB+ogL9wrF",
- "nnpzG1JP67U8nWixSMZTAs+U25OjGfpmjN70v1NOz9WijchnLg2xTcqFaxSTb9/YgyOsnNArNUpHS13Y",
- "AIPulH8KD6+NdUpuWyrhUdarPYrOnvqpre0GiOFHs6Z4+A2E3gYFMTidr+Q9HArATQfjxblxmWuGs60i",
- "aDAbiKJ3KO8HsYhbTocidihgx37u9R6nGfb0bIS9laA+FKyP0Pc+zpQVXDjXeCMs+pR1EenD5sJtm65Z",
- "4O4kXJz3oMXu+8uhmGymhVzkwPB795mhC3Dp7PU78zRXH5Xkr4T0q3vmleDVUfHR+fejE3CoL2sGHTTa",
- "nruS9jRNdyf//meKYWMgTbn5FzDh9ha990hTX9sl81TThNXlkEeVR26divH3lobrHzU1j5CfCqVFU4I7",
- "9hDTyFi3c3xLKajf1IflA00uITVYd71xoJcA+1RzsoMFj/z9uw7SwN2xDgl05Y+21TzqF1vfcaD10pKC",
- "1DoqVH0wvsLPSR0mhUIJK+AuQLp39toJB6PDnudzSI243JEG9o8lyCDFaOqNEPRebpAVJuowWqwisr+J",
- "rUFoW5bWVnyCan63RmcoCeQCNvc0a3FDtHL21J8rNykggRRA6ZBYFlE6FoZAVlPnGRa65gykgg/7oe7Q",
- "lOIafHMnSGq84VieJe2J2yQ6bhky/ujHqLFs173SfzEidChTrP9owLCy/QrfaND1e3i+AEV4JWWn/TJ9",
- "V66ABSbt1Y4CX8oCtP/NZ+jSKLm4gPBVIHTLXPEy8y2idgZvwki2nEe99C5f8L6L9LweWTRBmv2Enkjh",
- "JwzFTXNl9a9kKJ65HRcZPp6P0R9U8hsjPi1ecyjd62mo7OVKQ2KUD+rchsc2UriH3m9CBD1YbJGQGyyB",
- "8q6p8YJFZzmWPOEusiWcICthxS12ZVCJZXjMbcR+Sd99BosvOrrTnFLz6+5C8z48V+geEUOunzN3Wu7O",
- "jLmJZUVISW+16lhZFmlJGZr+i1JlVUoHdLgxauvT6KJHW0RJ1CiR9mfZu1/mWALsdZBneAGbQ1L9fal+",
- "v5Qh9qRC0RyCvP7Oat+p0Sl+v84XNIHFneD5JQ0300mhVJ4M2PpP+9VlunvgQqQXkDF7dvjAtoFnS9h9",
- "NDHXztyr5cZXUykKkJA9OGDsRFIosffrtssbdwaX98y28dc4alZRwSdnUzr4IOMxmViKqbylfPNgtks1",
- "DVb43XIoArKjdsl6oLJNya8ij/gcjL2U9j2t3YdVGqYiLGJayo4nLCJeZP8mgn9hw2esGLUSaf8VhZ4q",
- "McfXqBIeAX5aC/Bp661A0Xm4w9cYomcaUk4KnL08cJFXJbjMAXo2p1NOv+Bm6ZfPNu+rWfbIBo1h/VSS",
- "nWu6FPjLiXuzp7svVJHkcAktR4JLZ6jSFLQWlxC+90OdWQZQ4FW9e4DELOQhX3VkiJt7EthYx1A3KlSI",
- "sLRSbIfEGHiMPSH20GNZyGJ0KbKKt+inb/EUy8i33UNcR+6QvTdHfHK9reGeS0nqYm4xQ6ZLJ/FLaPm3",
- "edqloyAFT7DUMAfeoqypcBtlZJC0ccrerIjGKH7o27QjWyZ4dmW75SWssdME75bkGsGbmt913SX9odmN",
- "4x6A8R12oBca5IInYLwm5ND5whG2P9RECaYyyAmt6e+y8bkJNuIrWCKS3XaaVPGMorPa6xIYcPXL2i46",
- "9C5T13yKBXWUxCJjfbOrRlcZ1ioPGcfK7vKS55/fdIqVlk6QHu6d2/hEQ9tbSGQipb5ZmNtrPmrswM52",
- "d0PLt2jq/QfYNYr6OB0o5/OodQXvGUKRyXOWq+aFOwTJrhAmOUUfPWczl6JTlJAKLTrZi1e+jHJtasJX",
- "BZrnj7fbtnbN82dlbsHGc6++sDdNSVaj8MRoMGy26BcWKgM7N8rlMe7rsUWEfjEZFdbK2HFcXLS8pVTi",
- "uhMGqEq4Y69pEP+0p9e0XwVk7PTIM2gPnUpDf56jT+sWbSMHdTO3sS7/PnG31e0c46mPl+O13TFUgAiC",
- "tawZosp+ffQrK2GOj9Uo9vAhDvDw4dQ1/fVx+7Pdzg8fxp9Z/lxBAkQjB8ONG+OYn4fCxik0eiBDobMe",
- "lcizXYzRyjdpnnvCjIpfXMbZF3lw6hfy5fS3qnv0Y5/wpO4iIGEic20NHgwVZJKMSCJx3SIpI2gVSatS",
- "mA0WwvGmf/FLNJzhu9pb6LzNdekEd/YZdQF1KaXGt1hpf7p+p3iO55HVqTE4zODTut+s+arIwW2Ur+7N",
- "/gpP/vY0O3ry6K+zvx09O0rh6bMXR0f8xVP+6MWTR/D4b8+eHsGj+fMXs8fZ46ePZ08fP33+7EX65Omj",
- "2dPnL/56z8ohizIhOvFp15P/i6+yJSdvT5Nzi2xDE16I+kVty8b+aRme4k6EFRf55Nj/9L/9DjtI1aoB",
- "73+duKzOydKYQh8fHl5dXR2EXQ4X6ExIjKrS5aEfp/+S8dvTOjOHrpa4opR04U0GnhVO8Nu7b87O2cnb",
- "04PgpczjydHB0cEjfEixAMkLMTmePMGfcPcscd0PHbNNjj9dTyeHS+A5+t7tHyswpUj9J33FFwsoD9wb",
- "O/any8eHXpU4/OQcKdfbvh2G5aoPP7X8TdmOnljO9vCTr9KyvXWrDIrzswUdRmKxrdnhDJM/xzYFHTQe",
- "ngpeMPThJ1SRB38/dBlx8Y94VaE9cOidsvGWLSp9MmuLa6eHe5L/8BP+B3nymoREDjEXLCWScdY0nzJh",
- "GJ+pEsujmHRp5YKvyyB00HKCnEpMfppZ5ra9XhIGvgITlaQ8ft83mSAg5iGhJLBs3mzU1kiNLDZlBWGV",
- "xPqkabVvzpv3R8mLj58eTR8dXf/Fnifuz2dPrkfGUrys4bKz+rAY2fAjFjVAQwzu38dHR7d47vNEBuSn",
- "RQpele3VCaKVGLY9uqXqAGI1MXYkX3fAx94Pu55Onu454632o1Z0bOQdsK95xnxuJY796PONfSoxksXK",
- "dUbn1vV08uxzzv5UWpbnOcOWQTWd/tL/JC+kupK+pVUyqtWKlxu/jXVLKDC32HiU8YVGT0YpLjnqdlLJ",
- "1hMhk4/oPYvltw7IG234DeTNme31b3nzueQNLtJdyJs2oDuWN4/33PN//hn/W8L+2STsGYm7W0lYp/BR",
- "SlFfA4V1AaVYgaTCRu5XKglwSK+W93/eyDT6Yx989/Ws2M+Hn9rV21uas15WJlNXVK8ielRgiVKeu3pm",
- "aBqtr1lGMQ+gCbNlP7o0mHyD9mCRAeOYn68q09yDbefaz1t7KiyE5mW9hZA4AJqccRQq3MeDADYNqZL0",
- "DlXnWHKYvVEZ9I8lPHh+q6DcNCePw3Eybcklx1iRMnm3FvN9MXK9H9uhaZz8On3mqB+fav19eMWFsYeX",
- "i3dFivY7G+D5ocvk7vzaJE/1vmBGWPBj6KyO/npYV5qNfuxeQWNf3RVsoJGvw+E/Nyao0KSDLFEbc95/",
- "tCuLdcwctzQWiuPDQ4whWyptDifX008d60X48WO9mL7ATb2o1x+v/ycAAP//m8e6KFm+AAA=",
+ "H4sIAAAAAAAC/+x9a3PcNrLoX0HNnio/7lAjP3etqtS5ip1kdeM4LkvJ3nNs3wRD9sxgRQIMAI5m4qv/",
+ "fgoNgARJcIYjKfamaj/ZGuLRaDQa/UL3p0kqilJw4FpNTj5NSippARok/kXTVFRcJywzf2WgUslKzQSf",
+ "nPhvRGnJ+HIynTDza0n1ajKdcFpA08b0n04k/FYxCdnkRMsKphOVrqCgZmC9LU3reqRNshSJG+LUDnH2",
+ "anK94wPNMglK9aH8kedbwniaVxkQLSlXNDWfFLliekX0iiniOhPGieBAxILoVasxWTDIM3XkF/lbBXIb",
+ "rNJNPryk6wbERIoc+nC+FMWccfBQQQ1UvSFEC5LBAhutqCZmBgOrb6gFUUBluiILIfeAaoEI4QVeFZOT",
+ "9xMFPAOJu5UCW+N/FxLgd0g0lUvQk4/T2OIWGmSiWRFZ2pnDvgRV5VoRbItrXLI1cGJ6HZEfKqXJHAjl",
+ "5N23L8mTJ09emIUUVGvIHJENrqqZPVyT7T45mWRUg//cpzWaL4WkPEvq9u++fYnzn7sFjm1FlYL4YTk1",
+ "X8jZq6EF+I4REmJcwxL3oUX9pkfkUDQ/z2EhJIzcE9v4TjclnP+L7kpKdboqBeM6si8EvxL7OcrDgu67",
+ "eFgNQKt9aTAlzaDvj5MXHz89mj46vv7L+9Pkv92fz55cj1z+y3rcPRiINkwrKYGn22QpgeJpWVHex8c7",
+ "Rw9qJao8Iyu6xs2nBbJ615eYvpZ1rmleGTphqRSn+VIoQh0ZZbCgVa6Jn5hUPDdsyozmqJ0wRUop1iyD",
+ "bGq479WKpSuSUmWHwHbkiuW5ocFKQTZEa/HV7ThM1yFKDFw3wgcu6F8XGc269mACNsgNkjQXChIt9lxP",
+ "/sahPCPhhdLcVeqwy4pcrIDg5OaDvWwRd9zQdJ5vicZ9zQhVhBJ/NU0JW5CtqMgVbk7OLrG/W43BWkEM",
+ "0nBzWveoObxD6OshI4K8uRA5UI7I8+eujzK+YMtKgiJXK9Ard+dJUKXgCoiY/xNSbbb9/5z/+IYISX4A",
+ "pegS3tL0kgBPRQbZETlbEC50QBqOlhCHpufQOhxcsUv+n0oYmijUsqTpZfxGz1nBIqv6gW5YURWEV8Uc",
+ "pNlSf4VoQSToSvIhgOyIe0ixoJv+pBey4inufzNtS5Yz1MZUmdMtIqygm6+Opw4cRWiekxJ4xviS6A0f",
+ "lOPM3PvBS6SoeDZCzNFmT4OLVZWQsgWDjNSj7IDETbMPHsYPg6cRvgJw/CCD4NSz7AGHwyZCM+Z0my+k",
+ "pEsISOaI/OSYG37V4hJ4TehkvsVPpYQ1E5WqOw3AiFPvlsC50JCUEhYsQmPnDh2Gwdg2jgMXTgZKBdeU",
+ "ccgMc0aghQbLrAZhCibcre/0b/E5VfD86dAd33wdufsL0d31nTs+arexUWKPZOTqNF/dgY1LVq3+I/TD",
+ "cG7Flon9ubeRbHlhbpsFy/Em+qfZP4+GSiETaCHC302KLTnVlYSTD/yh+Ysk5FxTnlGZmV8K+9MPVa7Z",
+ "OVuan3L702uxZOk5Ww4gs4Y1qnBht8L+Y8aLs2O9ieoVr4W4rMpwQWlLcZ1vydmroU22Yx5KmKe1thsq",
+ "Hhcbr4wc2kNv6o0cAHIQdyU1DS9hK8FAS9MF/rNZID3Rhfzd/FOWuemty0UMtYaO3ZWM5gNnVjgty5yl",
+ "1CDxnftsvhomAFaRoE2LGV6oJ58CEEspSpCa2UFpWSa5SGmeKE01jvQfEhaTk8lfZo39ZWa7q1kw+WvT",
+ "6xw7GZHVikEJLcsDxnhrRB+1g1kYBo2fkE1YtodCE+N2Ew0pMcOCc1hTro8alaXFD+oD/N7N1ODbSjsW",
+ "3x0VbBDhxDacg7ISsG14T5EA9QTRShCtKJAuczGvf7h/WpYNBvH7aVlafKD0CAwFM9gwpdUDXD5tTlI4",
+ "z9mrI/JdODaK4oLnW3M5WFHD3A0Ld2u5W6y2Lbk1NCPeUwS3U8gjszUeDUbMvwuKQ7ViJXIj9eylFdP4",
+ "765tSGbm91Gd/xwkFuJ2mLhQ0XKYszoO/hIoN/c7lNMnHGfuOSKn3b43IxszSpxgbkQrO/fTjrsDjzUK",
+ "ryQtLYDui71LGUclzTaysN6Sm45kdFGYgzMc0BpCdeOztvc8RCFBUujA8HUu0su/U7W6gzM/92P1jx9O",
+ "Q1ZAM5BkRdXqaBKTMsLj1Yw25oiZhqjgk3kw1VG9xLta3p6lZVTTYGkO3rhYYlGP/ZDpgYzoLj/if2hO",
+ "zGdztg3rt8MekQtkYMoeZ+dkyIy2bxUEO5NpgFYIQQqr4BOjdR8E5ctm8vg+jdqjb6xNwe2QWwTukNjc",
+ "+TH4WmxiMHwtNr0jIDag7oI+zDgoRmoo1Aj4XjnIBO6/Qx+Vkm77SMaxxyDZLNCIrgpPAw9vfDNLY5w9",
+ "nQt5M+7TYSucNCZnQs2oAfOddpCETasycaQYMVvZBp2BGi/fbqbRHT6GsRYWzjX9A7CgzKh3gYX2QHeN",
+ "BVGULIc7IP1VlOnPqYInj8n530+fPXr8y+Nnzw1JllIsJS3IfKtBkftONyNKb3N40F8ZakdVruOjP3/q",
+ "DZXtcWPjKFHJFApa9oeyBlArAtlmxLTrY62NZlx1DeCYw3kBhpNbtBNr2zegvWLKSFjF/E42YwhhWTNL",
+ "RhwkGewlpkOX10yzDZcot7K6C1UWpBQyYl/DI6ZFKvJkDVIxEfGmvHUtiGvhxduy+7uFllxRRczcaPqt",
+ "OAoUEcrSGz6e79uhLza8wc1Ozm/XG1mdm3fMvrSR7y2JipQgE73hJIN5tWxpQgspCkJJhh3xjv4ONIoC",
+ "F6yAc02L8sfF4m5URYEDRVQ2VoAyMxHbwsj1ClLBbSTEHu3MjToGPV3EeBOdHgbAYeR8y1O0M97FsR1W",
+ "XAvG0emhtjwNtFgDYw7ZskWWt9dWh9Bhp7qnIuAYdLzGz2joeAW5pt8KedFYAr+ToirvXMjrzjl2OdQt",
+ "xplSMtPX69CML/N29M3SwH4UW+MXWdBLf3zdGhB6pMjXbLnSgVrxVgqxuHsYY7PEAMUPVinLTZ++avZG",
+ "ZIaZ6ErdgQjWDNZwOEO3IV+jc1FpQgkXGeDmVyounA3Ea6CjGP3bOpT39MrqWXMw1JXSyqy2Kgl6b3v3",
+ "RdMxoak9oQmiRg34rmqno21lp7OxALkEmm3JHIATMXcOIue6wkVSdD1rL9440TDCL1pwlVKkoBRkiTNM",
+ "7QXNt7NXh96BJwQcAa5nIUqQBZW3BvZyvRfOS9gmGCihyP3vf1YPvgC8Wmia70Estomht1bznRewD/W4",
+ "6XcRXHfykOyoBOLvFaIFSrM5aBhC4UE4Gdy/LkS9Xbw9WtYg0R/3h1K8n+R2BFSD+gfT+22hrcqB8D+n",
+ "3hoJz2wYp1x4wSo2WE6VTvaxZdOopYObFQScMMaJceABwes1Vdr6kBnP0PRlrxOcxwphZophgAfVEDPy",
+ "z14D6Y+dmnuQq0rV6oiqylJIDVlsDRw2O+Z6A5t6LrEIxq51Hi1IpWDfyENYCsZ3yLIrsQiiuna1uCCL",
+ "/uLQIWHu+W0UlS0gGkTsAuTctwqwG4ZADQDCVINoSzhMdSinjruaTpQWZWm4hU4qXvcbQtO5bX2qf2ra",
+ "9omL6ubezgQojLxy7R3kVxazNvhtRRVxcJCCXhrZA80g1tndh9kcxkQxnkKyi/JRxTOtwiOw95BW5VLS",
+ "DJIMcrrtD/qT/Uzs510D4I436q7QkNgopvimN5Tsg0Z2DC1wPBUTHgl+Iak5gkYVaAjE9d4zcgY4dow5",
+ "OTq6Vw+Fc0W3yI+Hy7ZbHRkRb8O10GbHHT0gyI6jjwF4AA/10DdHBXZOGt2zO8V/gXIT1HLE4ZNsQQ0t",
+ "oRn/oAUM2FBdgHhwXjrsvcOBo2xzkI3t4SNDR3bAoPuWSs1SVqKu8z1s71z1604QdTOSDDRlOWQk+GDV",
+ "wDLsT2z8TXfMm6mCo2xvffB7xrfIcnKmUORpA38JW9S539rAzsDUcRe6bGRUcz9RThBQHy5mRPCwCWxo",
+ "qvOtEdT0CrbkCiQQVc0LprUN2G6rulqUSThA1K+xY0bnxLNBkX4HxngVz3GoYHn9rZhOrE6wG76LjmLQ",
+ "QofTBUoh8hEWsh4yohCMivcgpTC7zlzsuI8e9pTUAtIxbfTg1tf/PdVCM66A/JeoSEo5qlyVhlqmERIF",
+ "BRQgzQxGBKvndJEdDYYghwKsJolfHj7sLvzhQ7fnTJEFXPkHF6ZhFx0PH6Id561QunW47sAeao7bWeT6",
+ "QIePuficFtLlKfsjC9zIY3bybWfw2ktkzpRSjnDN8m/NADonczNm7SGNjIuqwHFH+XKCoWPrxn0/Z0WV",
+ "U30XXitY0zwRa5CSZbCXk7uJmeDfrGn+Y91tj07XRIGxooCMUQ35lpQSUrDR+UZUU/XYR8TG7aUrypco",
+ "oUtRLV3gmB0HOWylrC1EVrw3RFSK0RueoFU5xnFdsLB/oGHkF6BGh+qapK3GcEXr+dybnDFXod+5iIk+",
+ "6pWaTgZVTIPUdaNiWuS0X5mM4L4tASvATzPxSN8Fos4IG318hdtiqNds7h9jI2+GjkHZnzgIZWs+DkWz",
+ "Gf02396BlGEHIhJKCQrvhNAupOxXsQhflLlLQ22VhqJvOrddfxk4fu8GFTTBc8YhKQSHbfQRNePwA36M",
+ "Hie8lwY6o4Qw1Lcr9Lfg74DVnmcMNd4Wv7jb3RPadRGpb4W8Kx+kHXC0PD3C5bfXv+2mvKljkuZ5xJfn",
+ "3pt0GYCa1u/bmSRUKZEyFJLOMjW1B825/9zjlDb639ZRtHdw9rrjdpxW4VNGNMpCXhJK0pyhyVZwpWWV",
+ "6g+colEoWGok2shrv8Nmwpe+SdwuGTEbuqE+cIqRZrWpKBohsYCIXeRbAG8tVNVyCUp3lIsFwAfuWjFO",
+ "Ks40zlWY45LY81KCxJCfI9uyoFuyMDShBfkdpCDzSrfFbXxOpTTLc+dBM9MQsfjAqSY5UKXJD4xfbHA4",
+ "72X3R5aDvhLyssZC/HZfAgfFVBKPivrOfsWAVbf8lQtexefv9rP1uZjxmzdXW7QZNU+6/9/9/zx5f5r8",
+ "N01+P05e/K/Zx09Prx887P34+Pqrr/5/+6cn1189+M//iO2Uhz322MdBfvbKqaJnr1DfaJwuPdg/m8G9",
+ "YDyJElkYPtGhLXIfH7Y6AnrQtkbpFXzgesMNIa1pzjLDW25CDt0bpncW7enoUE1rIzrWJ7/WA6X4W3AZ",
+ "EmEyHdZ4YymqH0gYf1aHXkD3Ug7Py6Lidiu99G1fjfiALrGY1k8nbVaVE4Lv6lbURyO6Px8/ez6ZNu/h",
+ "6u+T6cR9/RihZJZtYq8eM9jElDN3QPBg3FOkpFsFOs49EPZo7JoNpgiHLcBo9WrFys/PKZRm8ziH87H4",
+ "zsiz4WfcBsmb84M+xa1zVYjF54dbS4AMSr2KZVtoCWrYqtlNgE6cRynFGviUsCM46hpZMqMvuii6HOgC",
+ "X/2j9inGaEP1ObCE5qkiwHq4kFGWjBj9oMjjuPX1dOIuf3Xn6pAbOAZXd87agej/1oLc++6bCzJzDFPd",
+ "sw9w7dDBk8mIKu1eBbUigAw3szlmrJD3gX/gr2DBODPfTz7wjGo6m1PFUjWrFMivaU55CkdLQU78Q6NX",
+ "VNMPvCdpDaaBCp54kbKa5ywll6FC0pCnTe3RH+HDh/c0X4oPHz72giH66oObKspf7ASJEYRFpROXmCCR",
+ "cEVlzNmk6ofpOLLNPLJrVitki8paJH3iAzd+nOfRslTdB6r95ZdlbpYfkKFyzy/NlhGlhfSyiBFQLDS4",
+ "v2+EuxgkvfJ2lUqBIr8WtHzPuP5Ikg/V8fETIK0Xm7+6K9/Q5LaE0daVwQe0XaMKLtyqlbDRkiYlXcZ8",
+ "Wh8+vNdAS9x9lJcLtHHkOcFurZeiPhIeh2oW4PExvAEWjoNfveHizm0vn4QqvgT8hFuIbYy40Xjab7pf",
+ "wdvRG29X5/1pb5cqvUrM2Y6uShkS9ztT56ZZGiHLhz8otkRt1aXxmQNJV5BeuvwqUJR6O2119xE2TtD0",
+ "rIMpm3nHvvzC3A/oEZgDqcqMOlGc8m33Eb4CrX0c7zu4hO2FaFJHHPLqvv0IXA0dVKTUQLo0xBoeWzdG",
+ "d/NdGBcq9mXp31LjozpPFic1Xfg+wwfZirx3cIhjRNF6pDyECCojiLDEP4CCGyzUjHcr0o8tz2gZc3vz",
+ "RbLweN5PXJNGeXIRV+Fq0OpuvxeAabzElSJzauR24TJQ2YfOARerFF3CgIQcOmVGPiduOXJwkH33XvSm",
+ "E4vuhda7b6Ig28aJWXOUUsB8MaSCykwnzs7PZP1+zjOBiSUdwuY5ikl1QKJlOlS2nGM2U94QaHECBskb",
+ "gcOD0cZIKNmsqPLJsTCHmD/Lo2SAP/Dh/q50LWdBiFiQKKxOxuJ5bvec9rRLl7TFZ2rx6VlC1XJEqhUj",
+ "4WNUemw7BEcBKIMclnbhtrEnlCaJQLNBBo4fF4uccSBJLNosMIMG14ybA4x8/JAQa4Eno0eIkXEANvqz",
+ "cWDyRoRnky8PAZK7JAjUj42e8OBviL/XsvHXRuQRpWHhbMCrlXoOQF2IYn1/dQJlcRjC+JQYNremuWFz",
+ "TuNrBullDUGxtZMjxEVUPBgSZ3c4QOzFctCa7FV0k9WEMpMHOi7Q7YB4LjaJfbAZlXjnm7mh92hIOj4f",
+ "jR1Mm5/lniJzscEoHbxabAj0HliG4fBgBBr+himkV+w3dJtbYHZNu1uailGhQpJx5ryaXIbEiTFTD0gw",
+ "Q+RyP0i5ciMAOsaOJn+xU373Kqlt8aR/mTe32rRJJeZf+8SO/9ARiu7SAP76Vpg6ScrbrsQStVO0g03a",
+ "+WECETJG9IZN9J00fVeQghxQKUhaQlRyGfOcGt0G8MY5990C4wVmoaF8+yCIYJKwZEpDY0T3cRJfwjxJ",
+ "MfmdEIvh1elSLsz63glRX1PWjYgdW8v87CvAEOAFk0on6IGILsE0+lahUv2taRqXldoxUjZVLMvivAGn",
+ "vYRtkrG8itOrm/f7V2baNzVLVNUc+S3jNmBljqmNo5GTO6a2wbU7F/zaLvg1vbP1jjsNpqmZWBpyac/x",
+ "JzkXHc67ix1ECDBGHP1dG0TpDgYZvHjtc8dAbgp8/Ee7rK+9w5T5sfdG7fh3t0N3lB0pupbAYLBzFQzd",
+ "REYsYTrIDNx/ijpwBmhZsmzTsYXaUQc1ZnqQwcPnU+tgAXfXDbYHA4HdM/YaRoJqp85rBHyb47mVueZo",
+ "FGYu2gnuQoYQTsWUr1DQR1T9Wm4fri6A5t/D9mfTFpczuZ5Obmc6jeHajbgH12/r7Y3iGV3z1pTW8oQc",
+ "iHJallKsaZ44A/MQaUqxdqSJzb09+jOzurgZ8+Kb09dvHfjX00maA5VJLSoMrgrblX+aVdksfQMHxGdA",
+ "Nzqfl9mtKBlsfp1aLDRKX63ApZIOpNFezsvG4RAcRWekXsQjhPaanJ1vxC5xh48EytpF0pjvrIek7RWh",
+ "a8pybzfz0A5E8+DixiVOjXKFcIBbe1cCJ1lyp+ymd7rjp6Ohrj08KZxrR7LrwuZzV0TwrgsdY563pfO6",
+ "FxQzVlqrSJ858apAS0KicpbGbax8rgxxcOs7M40JNh4QRs2IFRtwxfKKBWOZZmNy0nSADOaIIlNF0+I0",
+ "uJsLV6un4uy3CgjLgGvzSeKp7BxUTG/irO3969TIDv253MDWQt8MfxsZI8zW2r3xEIjdAkboqeuB+6pW",
+ "mf1Ca4uU+SFwSRzg8A9n7F2JO5z1jj4cNdvgxVXb4xaW1unzP0MYNsf6/ro+Xnl1aWMH5ojW6WEqWUjx",
+ "O8T1PFSPIw+NfH5ahlEuv0P40CGsTtFiMbV1pyk31Mw+uN1D0k1ohWoHKQxQPe584JbDRJneQk253Wpb",
+ "NqMV6xYnmDCqdGbHbwjGwdyLxM3p1ZzGsogaIcPAdNo4gFu2dC2I7+xxr+rXFnZ2EviS67bMPiIvQTZv",
+ "APsJaW4oMNhpR4sKjWSAVBvKBFPr/8uViAxT8SvKbfUV088eJddbgTV+mV5XQmIKCBU3+2eQsoLmcckh",
+ "S/sm3owtmS0sUikIKle4gWzRJktFrvpH/YbIoeZsQY6nQfkctxsZWzPF5jlgi0e2xZwq5OS1IaruYpYH",
+ "XK8UNn88ovmq4pmETK+URawSpBbqUL2pnVdz0FcAnBxju0cvyH102ym2hgcGi+5+npw8eoFGV/vHcewC",
+ "cIVhdnGTDNnJPxw7idMx+i3tGIZxu1GPoq/lbWW4Yca14zTZrmPOErZ0vG7/WSoop0uIR4oUe2CyfXE3",
+ "0ZDWwQvPbFkjpaXYEqbj84Omhj8NRJ8b9mfBIKkoCqYL59xRojD01JSlsJP64WyNJJdR2MPlP6KPtPQu",
+ "oo4S+XmNpvZ+i60aPdlvaAFttE4JtXk/ctZEL/g85+TMpxXCFMt1ZmWLGzOXWTqKORjMsCClZFyjYlHp",
+ "RfI3kq6opKlhf0dD4Cbz508jaaXb6U35YYB/drxLUCDXcdTLAbL3MoTrS+5zwZPCcJTsQfPaIziVg87c",
+ "uNtuyHe4e+ixQpkZJRkkt6pFbjTg1LciPL5jwFuSYr2eg+jx4JV9dsqsZJw8aGV26Kd3r52UUQgZyxXY",
+ "HHcncUjQksEaY/fim2TGvOVeyHzULtwG+i/refAiZyCW+bMcUwS+FhHt1Kc6ry3pLlY9Yh0YOqbmgyGD",
+ "uRtqStpppT8/H72bKKi4p8sbtvuOLfPF4wH/6CLiC5MLbmDjy7crGSCUIK1+lGSy+nvgY6fka7EZSzid",
+ "U+iJ518ARVGUVCzPfm5efnaqFkjK01XUZzY3HX9p6qvVi7N3YDTt34pyDnl0OCtv/uLl0ojk/E8xdp6C",
+ "8ZFtu4UU7HI7i2sAb4PpgfITGvQynZsJQqy2H9XVQdv5UmQE52lyzDXHtV+AI0iT/lsFSsceKOEHGziG",
+ "tlHDDmyWbgI8Q430iHxnSyivgLQSCKEm6DNFtF9NV2UuaDbFDBYX35y+JnZW28dWCbJZwpeoCLVX0bGJ",
+ "Bekzx4Ug+4I/8ecR48fZHa9tVq10Uif1jj1ANS2atOOs4ydAFSnEzhF5FRRDtW9VzRCGHhZMFkarq0ez",
+ "8hHShPmP1jRdodrXYq3DJD8+vb2nShWUlKxLQ9U5JfHcGbhdhnub4H5KhNHNr5iylXNhDe03r/UDcGd2",
+ "8G9g28uTFeeWUo4OuOXqDJKHot0DZ69I70qIQtZB/IFCv60OcWi2/3PsFU1x1S0d0KslaV9Q1iV/fEX0",
+ "lHLBWYoJpmJXtCuxO8bPNiIXV9eQ64+4O6GRwxUtWFCH4jksDpYw8IzQIa5v6A++mk211GH/1FjLdUU1",
+ "WYJWjrNBNvV1N5ytkXEFLkcoFmQO+KSQLd8lcsioOzyp3SYHkhE+vRlQHr8139440wLGpF8yjkqEQ5sT",
+ "/Kw1ECuAaqN5ME2WApRbT/v9sXpv+hzhU9wMNh+PfMVQHMO6/syyrZ+7P9Sp93o7L7Np+9K0dQmS6p9b",
+ "Uc520tOydJMOV2WJygN6wwcRHPFeJt59FCC3Hj8cbQe57QxXwfvUEBqs0dkNJd7DPcKoK5R0ql8ZodVS",
+ "FLYgNkwsmiWB8QgYrxmHpp5t5IJIo1cCbgye14F+KpVUWxFwFE+7AJqjhzvG0JR27o3bDtVND2VQgmv0",
+ "cwxvY1NcZYBx1A0awY3ybV1G11B3IEy8xPrdDpH9UikoVTkhKsNXC53iKTHGYRi3L8/UvgD6x6AvE9nu",
+ "WlJ7cg65iYYeos6rbAk6oVkWS9n6NX4l+JVkFUoOsIG0qlN7liVJMe9KOxFNn9rcRKngqip2zOUb3HK6",
+ "oBpRhBrCikh+h/Ghy3yL/8byWg7vjAv0ODjU0Ed1ZIdlX+qHTsakXkPTiWLLZDwm8E65PTqaqW9G6E3/",
+ "O6X0XCzbgHzm9BO7uFy4RzH+9o25OMLsDL1krfZqqZMnYGCf8DUkUW2sn/22uRJeZb3srehQqmvU7TZA",
+ "DFebm+LlNxDeGyTdoPZ+tR7KoSDfdDAmnWr3Ok5TspMFDb44shFC9m0RQhG3zg5FBdmgIPO513ucZNiT",
+ "s3U88WGAUB9u1gfoex/LSkrKnPu9YRZ9zLqo9/47hDHxsM0GdxfhYskHLXbfr4fivn0yNvzerUZ1Ce7J",
+ "fClhzUTlHds+8smrhPbXVm2nOvI+uv6+4RWn+rLm0EHj7YWrCmCX6XTy73+2cXIEuJbbfwFTbm/Te3Wu",
+ "+tKuNU81TUidUHpUgunWrTgmUWEsJ56TDVuVtvbUCeuR1asx4kC/7td0cpYddGHG8ipO7CixYxev4jWc",
+ "dqpJNYVHrBSKNXndY+W9RoYYXmCFriBtVn8sH9+zhlRjMv8mbkECHJJEy0wWFAz9d/qpAXW6jsR0Wad2",
+ "pZrqZ/Dfc8f3XoMFLxpt9vOj8YmVTuvoNOTTmA15CdzV7Gy/8xgdbb5YQKrZes/ru3+sgAcvu6beLmNr",
+ "bweP8VgdvYzJWw63OjYA7XoctxOeIInircEZentzCdt7irSoIZqOfeqv2pvk7UAMIHdIDIkIFYv+sIZk",
+ "55BnqqYMxIKPtrLdocmANljJKXhLesO5PEmai6N5X7pjyngpmVFzma4HvbrGQNyhB3r9ShTD+scrLPyh",
+ "6iqLPu9HqKWTs352xCuXNwTfSta+E59BBJT/zT+MtrPk7BLCWlPoqbqiMvMtoqYXb9VJdtxHvVd1vopC",
+ "F+hFPTNrYmP776gi+bYwAjrNhREjkqEw8nY4ah3LcU/ZoBub/h0DbQ1cC5CuJh/Kv7lQkGjhY2l3wbEL",
+ "FTay6EZIUIM5Li1wg5ln3jWpdTDXL8VMM9QFFIULJBIKaqCTQQKc4Tl3Iful/e4fDvlcr3stTDW97i86",
+ "4KOimeohMaT6BXG35f4HSTcxNjHObd1nFcuGw0G2vSGlFFmV2gs6PBi1QW50rqkdrCRqp0n7q+zoCMGr",
+ "zkvYzqwS5Ks1+B0MgbaSkwU9yKLQ2eQ7Nb+pGNzLOwHvS1quppNSiDwZcHac9VP4dCn+kqWXkBFzU/jo",
+ "wYHKN+Q+2thrb/bVautT1pQlcMgeHBFyym28tndst3NIdybn9/Su+Tc4a1bZrFrOqHb0gccDXzHflbwl",
+ "N/PD7OZhCgyru+VUdpA9CWI2A+mDJL2K1IE6GquV913N3do8DVFZKGIySVN2Zk+cTB0i01T+aMJk+tJB",
+ "nourBKkoqfN/xXQO067NJH3G06abwfYcgngbqtwFuiUrmpFUSAlp2CP+xMECVQgJSS4w/CbmGVxoIw8V",
+ "GNfMSS6WRJRGzbVp9LwPJVqWJpjLPrO1PRPrqBlIZADKPat109jG/Xl2VK85vDLOxSpib0FEeywfXP7G",
+ "EcrBVSsCMEcQ6H5b02msuk97Xd36UEPV2rQoWBpH958rymQwNmRP7aLI+mpydKWV/KvAAVxFXba7PaS2",
+ "Dt18rJ+0zpk88lgEAAx7TlswjPKfHgrGAus6JjSC5LNaap22yu6yztn3+ewsjafUaq0rIGbsSoJ7pWYL",
+ "0HUq55RUr/wtZpr3dUujp4DCJ2S2/AdV1hLiLTKu+l1XPBBlksMaWg5l93SuSlNQiq0hrJxnO5MMoET7",
+ "ZFdqjnlKQy7XEaXc2pPA1zYGu1HZyiLW7hTZIzhFxbwNT+wxUWOPkoFozbKKtvCnblGLbKgMWYQNe1hH",
+ "coqDmUR8cbtYxN7YBqT56Lnk8dCG8OVmbRTB2bLaeGqJsDnZqqRXfFiJiNidan/77ddBcDCiOi+pB698",
+ "We/KTRXIQcrYRRi9+oFRmUOBr/8aJj3x4pbrG5GxrKmLqcgATDXnGaP3oIkOC5oVdEsytliAtMZ8pSnP",
+ "qMzC5oyTFKSmzGg2W3VzsdZAKyuY7pVsDXfFQT2Dicm4aJeygORbpzLcQupEz01E4rRXrRZDJRJ7uxJ/",
+ "TkA3RrrGuKoBInAPoVG2tgdMcBSQSEEv4cB5FPsddk+D6Umc7U8LnHXMFDFf6w1zq41i3f0whMjtFhRD",
+ "3O0ZClMvNm+6pI1mQUuyvyC7NP5Dc3GOK8voO+wBL3QYBoUZve3GgfOFH0f9UCMlWMrHIUpoLX+fD9It",
+ "sJE0gi1yjEBrsIlwbUB9e18CB7N6Wftth2qIdt27mGdRcFvkr+cWtrzJVu0LCMecBbmm+ed37WICzlPE",
+ "B2Tvho3BoW8wRLJFpbrZy4TXdNTcgR/w7qbmb9EV/Q8wexTVSt1QToSpxXofzIM3C82t4WLhS3itgZMr",
+ "HNPGsT16Tubu5XYpIWWqKxpd+eoatSsMi0251yAbvcf3tm+dPwt9CzJeeE2DvGky9aOOv+QNhM0R/cJM",
+ "ZeDkRqk8Rn09sojgL8ajwhRqe66Ly1aAm6180nm5ISTccaBbELJ+YKBbPznc2OXZYC5z6VQK+uscfVu3",
+ "cBu5qJu1jY3S7CN3Vzr3McGV8SoNpjtGd1qEYIkTgqCSXx/9SiQssIahIA8f4gQPH05d018ftz+b4/zw",
+ "YVQ6+2xxnRZHbgw3b4xifh566Wdfsw08Ku3sR8XybB9htJ4IN1VA8RHsLy4RwRepQ/qLjTXpH1VXC+4W",
+ "AXIWMZG1tiYPpgoe/4549+u6RV75oh8nrSTTW8yP6O0H7JdoBOp3dTSTi4ar9UN392lxCXWGzSb2qVL+",
+ "dv1O0BzvI6u2cnMLifyIfLOhRZmDOyhf3Zv/FZ787Wl2/OTRX+d/O352nMLTZy+Oj+mLp/TRiyeP4PHf",
+ "nj09hkeL5y/mj7PHTx/Pnz5++vzZi/TJ00fzp89f/PWe4UMGZAvoxGfjmfxfLNabnL49Sy4MsA1OaMm+",
+ "h62tC2jI2FccpCmeRCgoyycn/qf/7U/YUSqKZnj/68Ql+5istC7VyWx2dXV1FHaZLTHYIdGiSlczP0+v",
+ "JOHp27PaS2StQLij9p2st+55UjjFb+++Ob8gp2/PjoJ69SeT46Pjo0dY3rwETks2OZk8wZ/w9Kxw32eO",
+ "2CYnn66nk9kKaI6xgeaPArRkqf8kgWZb9391RZdLkEeuDKP5af145sWK2ScX9HG969ssrGgy+9SKjcn2",
+ "9MSKB7NPPpHf7tatTHkuJijoMBKKXc1mc8wPMrYpqKDx8FJQ2VCzTyguD/4+cwkN4h9RbbHnYeYDyOIt",
+ "W1j6pDcG1k6PlOp0VZWzT/gfpM9ryzByiIWL2TwAlDTNp4RpQudCYgY9na4Mj/Cpu5gKWk6Qai3Bn2WG",
+ "0E2vlxYCn6TTZi0/ed93Z+FAxI+EXMGQfHNoWzM1fBktUEEi7frWabVv7p73x8mLj58eTR8dX//F3C3u",
+ "z2dPrkf6pV7W45Lz+uIY2fAj5r1CAySe5cfHx7eoCH/KA/TbTaof8kSKutudGHYZuK3qDERqZOzJz9MZ",
+ "PlZi9no6eXrginfaklqPmyKlYr+mGfEuf5z70eeb+4xj1K3h8cTeYdfTybPPufozbkie5gRbBgkX+1v/",
+ "E7/k4or7lkbgqIqCyq0/xqrFFIjbbLzW6FJhHIZka4pyHhe8VUVu8hFjf2JhFwP8Rml6A35zbnr9m998",
+ "Ln6Dm3QX/KY90B3zm8cHnvk//4r/zWH/bBz23LK7W3FYJ/DZF+EzveEzdGfNPrUEVPe5J6C2f2+6hy3W",
+ "hcjAy6BisbC55nd9nn2y/wYTwaYEyQrgNgen+9W+lpthBsht/+ctT6M/9tfRLfQa+3n2qV1oqIUgtap0",
+ "Jq5s2rPolYXZ9GnuUu+iubZW/bQgfoDmaRL50b2mzrdoo2YZEIppnkSlG93cdK7DRGrviRmhKQK9ZBwn",
+ "QDM4zmJzTNMg6F9BKrgtmdq5Hh1kb0QG/esRL8DfKpDb5gZ0ME6mLf7oCDyS0fnW102fnV0fRv5orre+",
+ "pj5x1HVSW3/PrijT5hJ1b4QQo/3OGmg+cwmBOr82b/B7XzCxQPBjGOsS/XVWF0WIfuyqwrGvThUcaORd",
+ "7/5zYxYLzUxIErWB6f1Hs7OYctdRS2M1OZnNMO5+JZSeTa6nnzoWlfDjx3ozfZ7EelOvP17/TwAAAP//",
+ "Qz/sGdnLAAA=",
}
// GetSwagger returns the content of the embedded swagger specification file
diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go
index 7c89fbd8b..8eb9f99c1 100644
--- a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go
+++ b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go
@@ -54,6 +54,21 @@ type ServerInterface interface {
// Get a proof for a transaction in a block.
// (GET /v2/blocks/{round}/transactions/{txid}/proof)
GetTransactionProof(ctx echo.Context, round uint64, txid string, params GetTransactionProofParams) error
+ // Get a LedgerStateDelta object for a given transaction group
+ // (GET /v2/deltas/txn/group/{id})
+ GetLedgerStateDeltaForTransactionGroup(ctx echo.Context, id string, params GetLedgerStateDeltaForTransactionGroupParams) error
+ // Get a LedgerStateDelta object for a given round
+ // (GET /v2/deltas/{round})
+ GetLedgerStateDelta(ctx echo.Context, round uint64, params GetLedgerStateDeltaParams) error
+ // Get LedgerStateDelta objects for all transaction groups in a given round
+ // (GET /v2/deltas/{round}/txn/group)
+ GetTransactionGroupLedgerStateDeltasForRound(ctx echo.Context, round uint64, params GetTransactionGroupLedgerStateDeltasForRoundParams) error
+ // Returns the timestamp offset. Timestamp offsets can only be set in dev mode.
+ // (GET /v2/devmode/blocks/offset)
+ GetBlockTimeStampOffset(ctx echo.Context) error
+ // Given a timestamp offset in seconds, adds the offset to every subsequent block header's timestamp.
+ // (POST /v2/devmode/blocks/offset/{offset})
+ SetBlockTimeStampOffset(ctx echo.Context, offset uint64) error
// Get the current supply reported by the ledger.
// (GET /v2/ledger/supply)
GetSupply(ctx echo.Context) error
@@ -387,6 +402,116 @@ func (w *ServerInterfaceWrapper) GetTransactionProof(ctx echo.Context) error {
return err
}
+// GetLedgerStateDeltaForTransactionGroup converts echo context to params.
+func (w *ServerInterfaceWrapper) GetLedgerStateDeltaForTransactionGroup(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "id" -------------
+ var id string
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "id", runtime.ParamLocationPath, ctx.Param("id"), &id)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter id: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Parameter object where we will unmarshal all parameters from the context
+ var params GetLedgerStateDeltaForTransactionGroupParams
+ // ------------- Optional query parameter "format" -------------
+
+ err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), &params.Format)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err))
+ }
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetLedgerStateDeltaForTransactionGroup(ctx, id, params)
+ return err
+}
+
+// GetLedgerStateDelta converts echo context to params.
+func (w *ServerInterfaceWrapper) GetLedgerStateDelta(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "round" -------------
+ var round uint64
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "round", runtime.ParamLocationPath, ctx.Param("round"), &round)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Parameter object where we will unmarshal all parameters from the context
+ var params GetLedgerStateDeltaParams
+ // ------------- Optional query parameter "format" -------------
+
+ err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), &params.Format)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err))
+ }
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetLedgerStateDelta(ctx, round, params)
+ return err
+}
+
+// GetTransactionGroupLedgerStateDeltasForRound converts echo context to params.
+func (w *ServerInterfaceWrapper) GetTransactionGroupLedgerStateDeltasForRound(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "round" -------------
+ var round uint64
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "round", runtime.ParamLocationPath, ctx.Param("round"), &round)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter round: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Parameter object where we will unmarshal all parameters from the context
+ var params GetTransactionGroupLedgerStateDeltasForRoundParams
+ // ------------- Optional query parameter "format" -------------
+
+ err = runtime.BindQueryParameter("form", true, false, "format", ctx.QueryParams(), &params.Format)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter format: %s", err))
+ }
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetTransactionGroupLedgerStateDeltasForRound(ctx, round, params)
+ return err
+}
+
+// GetBlockTimeStampOffset converts echo context to params.
+func (w *ServerInterfaceWrapper) GetBlockTimeStampOffset(ctx echo.Context) error {
+ var err error
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.GetBlockTimeStampOffset(ctx)
+ return err
+}
+
+// SetBlockTimeStampOffset converts echo context to params.
+func (w *ServerInterfaceWrapper) SetBlockTimeStampOffset(ctx echo.Context) error {
+ var err error
+ // ------------- Path parameter "offset" -------------
+ var offset uint64
+
+ err = runtime.BindStyledParameterWithLocation("simple", false, "offset", runtime.ParamLocationPath, ctx.Param("offset"), &offset)
+ if err != nil {
+ return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter offset: %s", err))
+ }
+
+ ctx.Set(Api_keyScopes, []string{""})
+
+ // Invoke the callback with all the unmarshalled arguments
+ err = w.Handler.SetBlockTimeStampOffset(ctx, offset)
+ return err
+}
+
// GetSupply converts echo context to params.
func (w *ServerInterfaceWrapper) GetSupply(ctx echo.Context) error {
var err error
@@ -557,6 +682,11 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL
router.GET(baseURL+"/v2/blocks/:round/hash", wrapper.GetBlockHash, m...)
router.GET(baseURL+"/v2/blocks/:round/lightheader/proof", wrapper.GetLightBlockHeaderProof, m...)
router.GET(baseURL+"/v2/blocks/:round/transactions/:txid/proof", wrapper.GetTransactionProof, m...)
+ router.GET(baseURL+"/v2/deltas/txn/group/:id", wrapper.GetLedgerStateDeltaForTransactionGroup, m...)
+ router.GET(baseURL+"/v2/deltas/:round", wrapper.GetLedgerStateDelta, m...)
+ router.GET(baseURL+"/v2/deltas/:round/txn/group", wrapper.GetTransactionGroupLedgerStateDeltasForRound, m...)
+ router.GET(baseURL+"/v2/devmode/blocks/offset", wrapper.GetBlockTimeStampOffset, m...)
+ router.POST(baseURL+"/v2/devmode/blocks/offset/:offset", wrapper.SetBlockTimeStampOffset, m...)
router.GET(baseURL+"/v2/ledger/supply", wrapper.GetSupply, m...)
router.GET(baseURL+"/v2/stateproofs/:round", wrapper.GetStateProof, m...)
router.GET(baseURL+"/v2/status", wrapper.GetStatus, m...)
@@ -572,235 +702,253 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9a3Mbt5LoX0Fxt8qP5VB+Zk9Uldor23loYzsuS8nuObFvAs40SRwNgTkARiLj6/9+",
- "Cw1gBjODIYcSLdmJPtni4NFoNBqNfn4YpWJZCA5cq9Hhh1FBJV2CBol/0TQVJdcJy8xfGahUskIzwUeH",
- "/htRWjI+H41HzPxaUL0YjUecLqFuY/qPRxL+VTIJ2ehQyxLGI5UuYEnNwHpdmNbVSKtkLhI3xJEd4vjF",
- "6OOGDzTLJCjVhfInnq8J42leZkC0pFzR1HxS5ILpBdELpojrTBgnggMRM6IXjcZkxiDP1MQv8l8lyHWw",
- "Sjd5/5I+1iAmUuTQhfO5WE4ZBw8VVEBVG0K0IBnMsNGCamJmMLD6hloQBVSmCzITcguoFogQXuDlcnT4",
- "60gBz0DibqXAzvG/MwnwBySayjno0ftxbHEzDTLRbBlZ2rHDvgRV5loRbItrnLNz4MT0mpBXpdJkCoRy",
- "8va75+Tx48dfm4UsqdaQOSLrXVU9e7gm2310OMqoBv+5S2s0nwtJeZZU7d9+9xznP3ELHNqKKgXxw3Jk",
- "vpDjF30L8B0jJMS4hjnuQ4P6TY/Ioah/nsJMSBi4J7bxXjclnP9GdyWlOl0UgnEd2ReCX4n9HOVhQfdN",
- "PKwCoNG+MJiSZtBfHyRfv//wcPzwwcd/+/Uo+Yf78+njjwOX/7wadwsGog3TUkrg6TqZS6B4WhaUd/Hx",
- "1tGDWogyz8iCnuPm0yWyeteXmL6WdZ7TvDR0wlIpjvK5UIQ6MspgRstcEz8xKXlu2JQZzVE7YYoUUpyz",
- "DLKx4b4XC5YuSEqVHQLbkQuW54YGSwVZH63FV7fhMH0MUWLguhQ+cEGfLzLqdW3BBKyQGyRpLhQkWmy5",
- "nvyNQ3lGwgulvqvUbpcVOV0AwcnNB3vZIu64oek8XxON+5oRqggl/moaEzYja1GSC9ycnJ1hf7cag7Ul",
- "MUjDzWnco+bw9qGvg4wI8qZC5EA5Is+fuy7K+IzNSwmKXCxAL9ydJ0EVgisgYvpPSLXZ9v8++ek1EZK8",
- "AqXoHN7Q9IwAT0UG2YQczwgXOiANR0uIQ9Ozbx0Ortgl/08lDE0s1byg6Vn8Rs/ZkkVW9Yqu2LJcEl4u",
- "pyDNlvorRAsiQZeS9wFkR9xCiku66k56Kkue4v7X0zZkOUNtTBU5XSPClnT1zYOxA0cRmuekAJ4xPid6",
- "xXvlODP3dvASKUqeDRBztNnT4GJVBaRsxiAj1SgbIHHTbIOH8d3gqYWvABw/SC841SxbwOGwitCMOd3m",
- "CynoHAKSmZCfHXPDr1qcAa8InUzX+KmQcM5EqapOPTDi1JslcC40JIWEGYvQ2IlDh2Ewto3jwEsnA6WC",
- "a8o4ZIY5I9BCg2VWvTAFE25+73Rv8SlV8NWTvju+/jpw92eivesbd3zQbmOjxB7JyNVpvroDG5esGv0H",
- "vA/DuRWbJ/bnzkay+am5bWYsx5von2b/PBpKhUyggQh/Nyk251SXEg7f8fvmL5KQE015RmVmflnan16V",
- "uWYnbG5+yu1PL8WcpSds3oPMCtbogwu7Le0/Zrw4O9ar6LvipRBnZREuKG08XKdrcvyib5PtmLsS5lH1",
- "2g0fHqcr/xjZtYdeVRvZA2Qv7gpqGp7BWoKBlqYz/Gc1Q3qiM/mH+acoctNbF7MYag0duysZ1QdOrXBU",
- "FDlLqUHiW/fZfDVMAOxDgtYtDvBCPfwQgFhIUYDUzA5KiyLJRUrzRGmqcaR/lzAbHY7+7aDWvxzY7uog",
- "mPyl6XWCnYzIasWghBbFDmO8MaKP2sAsDIPGT8gmLNtDoYlxu4mGlJhhwTmcU64n9ZOlwQ+qA/yrm6nG",
- "t5V2LL5bT7BehBPbcArKSsC24R1FAtQTRCtBtKJAOs/FtPrh7lFR1BjE70dFYfGB0iMwFMxgxZRW93D5",
- "tD5J4TzHLybk+3BsFMUFz9fmcrCihrkbZu7WcrdYpVtya6hHvKMIbqeQE7M1Hg1GzN8HxeGzYiFyI/Vs",
- "pRXT+AfXNiQz8/ugzl8GiYW47ScufGg5zNk3Dv4SPG7utiinSzhO3TMhR+2+lyMbM0qcYC5FKxv30467",
- "AY8VCi8kLSyA7ou9SxnHR5ptZGG9IjcdyOiiMAdnOKA1hOrSZ23reYhCgqTQguFZLtKzH6ha7OHMT/1Y",
- "3eOH05AF0AwkWVC1mIxiUkZ4vOrRhhwx0xAf+GQaTDWplriv5W1ZWkY1DZbm4I2LJRb12A+ZHsjI2+Un",
- "/A/NiflszrZh/XbYCTlFBqbscXZGhsy89u0Dwc5kGqAWQpClfeAT8+reCcrn9eTxfRq0R99anYLbIbcI",
- "3CGx2vsxeCZWMRieiVXnCIgVqH3QhxkHxUgNSzUAvhcOMoH779BHpaTrLpJx7CFINgs0oqvC08DDG9/M",
- "Uitnj6ZCXo77tNgKJ7XKmVAzasB8xy0kYdOySBwpRtRWtkFroNrKt5lptIePYayBhRNNPwEWlBl1H1ho",
- "DrRvLIhlwXLYA+kvokx/ShU8fkROfjh6+vDRb4+efmVIspBiLumSTNcaFLnr3mZE6XUO97orw9dRmev4",
- "6F898YrK5rixcZQoZQpLWnSHsgpQKwLZZsS062KtiWZcdQXgkMN5CoaTW7QTq9s3oL1gykhYy+leNqMP",
- "YVk9S0YcJBlsJaZdl1dPsw6XKNey3MdTFqQUMqJfwyOmRSry5BykYiJiTXnjWhDXwou3Rft3Cy25oIqY",
- "uVH1W3IUKCKUpVd8ON+3Q5+ueI2bjZzfrjeyOjfvkH1pIt9rEhUpQCZ6xUkG03LeeAnNpFgSSjLsiHf0",
- "96BP1jxFrdo+iLT/mbZkHFX8as3T4M1mNiqHbN7YhKu/zdpY8fo5O9UdFQHHoOMlfsZn/QvINd27/NKe",
- "IAb7c7+RFliSmYb4Cn7J5gsdCJhvpBCz/cMYmyUGKH6w4nlu+nSF9NciA7PYUu3hMq4Hq2nd7GlI4XQq",
- "Sk0o4SID1KiUKn5N91ju0WSIlk4d3vx6YSXuKRhCSmlpVlsWBO14Hc5Rd0xoaqk3QdSoHitGZX6yrex0",
- "1iqcS6CZedUDJ2LqTAXOiIGLpGiE1P6ic0JC5Cw14CqkSEEpyBKnotgKmm9nmYjegCcEHAGuZiFKkBmV",
- "Vwb27HwrnGewTtBkrsjdH39R924AXi00zbcgFtvE0Fs9+Jw9qAv1sOk3EVx78pDsqATiea55XRoGkYOG",
- "PhTuhJPe/WtD1NnFq6PlHCRaZj4pxftJrkZAFaifmN6vCm1Z9DiCuYfOKVui3o5TLhSkgmcqOlhOlU62",
- "sWXTqPEaMysIOGGME+PAPULJS6q0tSYynqESxF4nOI8VUMwU/QD3CqRm5F+8LNodOzX3IFelqgRTVRaF",
- "kBqy2Bo4rDbM9RpW1VxiFoxdSb9akFLBtpH7sBSM75BlV2IRRHWldHfm9u7iUDVt7vl1FJUNIGpEbALk",
- "xLcKsBs6w/QAwlSNaEs4TLUop/LAGY+UFkVhuIVOSl7160PTiW19pH+u23aJi+r63s4EKPTBce0d5BcW",
- "s9YNakHNExpHJkt6ZmQPfBBbs2cXZnMYE8V4CskmyjfH8sS0Co/A1kNaFnNJM0gyyOm6O+jP9jOxnzcN",
- "gDteP3yEhsT6s8Q3vaZk7z6wYWiB46mY8EjwC0nNETQvj5pAXO8tI2eAY8eYk6OjO9VQOFd0i/x4uGy7",
- "1ZER8TY8F9rsuCUHhNgx9CHw9qChGvnymMDOSf0sa0/xd1BugkqM2H2SNai+JdTj77SAHmWa8xQOjkuL",
- "u7cYcJRr9nKxLWyk78T2aPbeUKlZygp86vwI672//NoTRO1NJANNWQ4ZCT7YV2AR9ifWEaM95uVegoOU",
- "MF3wO1qYyHJyplDiaQJ/Bmt8cr+xHn6ngV/gHp6ykVHN9UQ5QUC935CRwMMmsKKpztdGTtMLWJMLkEBU",
- "OV0yra3nbvOlq0WRhANEFdwbZnTWHOsd53dgiHnpBIcKltfdivHIPgk2w3faehc00OGeAoUQ+QDlUQcZ",
- "UQgGGf5JIcyuM+dE7N1IPSU1gHRMG0151e1/RzXQjCsgfxclSSnHF1epoRJphEQ5AeVHM4ORwKo5nYm/",
- "xhDksAT7kMQv9++3F37/vttzpsgMLrznvWnYRsf9+6jGeSOUbhyuPagKzXE7jlwfqPnHe885L7R4ynYT",
- "sxt5yE6+aQ1emQvMmVLKEa5Z/pUZQOtkroasPaSRYeZ1HHeQUj8YOrZu3PcTtixzqvdhvtgoj1bvCbZc",
- "QsaohnxNCgkpWO9qI2ApC4sBjVi/q3RB+RzlainKuXP8seMgYyyV1WDIkneGiAofesWTuRRlEWOUztnT",
- "O9gbsQOoefkEiMTOVs6/oNV8LqZiyA3mER7szvdmzD6rwnjU+zA0SD2vH4YWOc0ogTgWMOwhUWWaAkRd",
- "gGNPrmqprWjIOr7FDWjEhlJaHyhCU13SPKQ6cjwjlK+bYZKU5cpwQaYItjOda7/asV2bj2GZ0dzaZiNB",
- "FeFJaUh8wc7XKG2jYqDdAYnESENdyggJ0BwvQ8afRodfDx2Dsjtx4HRVf+zzuzLv73y9BzHIDkQkFBIU",
- "Xlqh3krZr2IWxj65W02tlYZlV7Vvu/7Ww2je9j4gBc8Zh2QpOKyj4b6Mwyv8GGUceHH2dEYRpq9v+1XS",
- "gL8FVnOeIdR4Vfzibge86E3lcLiHzW+P27LqhFFfqLWEvCCUpDlDnabgSssy1e84Ra1JcNgijhn+fdiv",
- "R3vum8QVdxG9mhvqHafolFPpUqLG5BlEFAffAXh1mirnc1At/klmAO+4a8U4KTnTONfS7FdiN6wAid4R",
- "E9tySdeGBaLa7w+QgkxL3eTJGHmitGGX1sRkpiFi9o5TTXIwb+pXjJ+ucDhvovU0w0FfCHlWYSF+hcyB",
- "g2IqiTuQfG+/om+fW/7C+flhpLD9bI0SZvw6PGWNSpU6+vX/3v2vw1+Pkn/Q5I8Hydf/cfD+w5OP9+53",
- "fnz08Ztv/l/zp8cfv7n3X/8e2ykPeywuwkF+/MI91o5foEReWyU6sF+bRnrJeBIlstD23qItchdjAB0B",
- "3Wvqa/QC3nG94oaQzmnOMiNyXYYc2iyucxbt6WhRTWMjWvoZv9Yd5dwrcBkSYTIt1njpa7zrcxWPQEIz",
- "mQsqwvMyK7ndSi/oWgd77/siZuMqyswmoDgkGIK0oN5xy/356OlXo3EdOlR9H41H7uv7CCWzbBWVDmEV",
- "e764A4IH444iBV0r6BFAEfaom4/1NgiHXYJ596oFK66fUyjNpnEO592WnRpkxY+59Sc25weNbmunyxez",
- "64dbSyOHF3oRC0xvSArYqt5NgJYjRCHFOfAxYROYtNUQmXmaOYejHOgMA6TxoSeGhGFU58ASmqeKAOvh",
- "Qga99WP0g8Kt49YfxyN3+au9y+Nu4Bhc7TkrC5v/Wwty5/tvT8mBY5jqjo1VtEMH0WWRV6sLoGi4yBhu",
- "ZtNx2GDNd/wdfwEzxpn5fviOZ1TTgylVLFUHpQL5jOaUpzCZC3LoYzJeUE3f8Y6k1ZsxJ4iGIUU5zVlK",
- "zkKJuCZPmwWhO8K7d7/SfC7evXvf8Rboyq9uqih/sRMkF0wvRKkTF8OdSLigMmaNUVUML45skzRsmnVM",
- "3NiWFbsYcTd+nOfRolDtWL7u8osiN8sPyFC5SDWzZURpIb0sYgQUCw3u72vhLgZJL7wKo1SgyO9LWvzK",
- "uH5PknflgwePgTSC2353V76hyXUBgxUZvbGGbf0FLty+a2ClJU0KOo9Zfd69+1UDLXD3UV5e4iM7zwl2",
- "awTVeadhHKpegMdH/wZYOHYOEMLFndhePl9PfAn4CbcQ2xhxozZFX3a/gjC7S29XK1Svs0ulXiTmbEdX",
- "pQyJ+52p0njMjZDl/QMUm6MPpst4MgWSLiA9c6koYFno9bjR3bugOEHTsw6mbJISGySDYfKoM58CKYuM",
- "OlG8rUGarokCrb0T6Fs4g/WpqKPsdwlQbsbLqr6DipQaSJeGWMNj68Zob77zc0IVV1H4sFOMP/JkcVjR",
- "he/Tf5CtyLuHQxwjikY8Zx8iqIwgwhJ/DwousVAz3pVIP7Y888qY2psvkrDE837imtSPJ+eSFK4GFdz2",
- "+xIw45G4UGRKjdwuXLIeGxMacLFS0Tn0SMih2WJg5GXD1IGDbLv3ojedmLUvtM59EwXZNk7MmqOUAuaL",
- "IRV8zLQc0fxM1jLmjACYg88hbJqjmFR57FmmQ2XDfGSTivWBFidgkLwWODwYTYyEks2CKp9HCNMt+bM8",
- "SAb4hDHOmzJbhAr9IKdSpV/3PLd9TjuvS5ffwie18JkswqflgKwURsJHt+3YdgiOAlAGOcztwm1jTyh1",
- "vHW9QQaOn2aznHEgScwdiyolUmYTQdXXjJsDjHx8nxCrAiaDR4iRcQA2WnxxYPJahGeTz3cBkrt4cerH",
- "Rltx8DfEQ1usg7IReURhWDjrMSClngNQ58NX3V8tT1IchjA+JobNndPcsDn34qsH6SRYQLG1lU7B+Rzc",
- "6xNnN2jg7cWy05rsVXSZ1YQykwc6LtBtgHgqVomNbYtKvNPV1NB71GcbI+1iB9OmsrijyFSs0I8Frxbr",
- "I7wFln44PBjBC3/FFNIr9uu7zS0wm6bdLE3FqFAhyTh1XkUufeLEkKl7JJg+crkbZKe4FAAtZUed6tU9",
- "frc+UpviSfcyr2+1cZ11yYfDxI5/3xGK7lIP/rpamCqfxJu2xBLVUzTdMZqpNAIRMkb0hk10jTRdU5CC",
- "HPBRkDSEqOQsZrozbxvAG+fEdwuUF5iwg/L1vcDHR8KcKQ21Et27JNyEepJinjAhZv2r04WcmfW9FaK6",
- "pmwiGuzYWOa1rwB9ZGdMKp2gBSK6BNPoO4WP6u9M07is1PQislk1WRbnDTjtGayTjOVlnF7dvD++MNO+",
- "rliiKqfIbxm3viFTzAIb9S3cMLV1P9244Jd2wS/p3tY77DSYpmZiacilOccXci5anHcTO4gQYIw4urvW",
- "i9INDDIICe1yx0BusocTQ0Inm7SvncOU+bG3uo34wNS+O8qOFF1LoDDYuAqGZiIjljAdJFHtxmr2nAFa",
- "FCxbtXShdtTeFzPdSeHhU0+1sIC76wbbgoFA7xkLF5GgmlnGagHfpsNtJPmYDMLMaTMXWMgQwqmY8snc",
- "u4iqwsm24eoUaP4jrH8xbXE5o4/j0dVUpzFcuxG34PpNtb1RPKNp3qrSGpaQHVFOi0KKc5onTsHcR5pS",
- "nDvSxOZeH33NrC6uxjz99ujlGwf+x/EozYHKpBIVeleF7YovZlU2oVnPAfHJos2bz8vsVpQMNr/KwhQq",
- "pS8W4LLuBtJoJz1gbXAIjqJTUs/iHkJbVc7ONmKXuMFGAkVlIqnVd9ZC0rSK0HPKcq8389D2ePPg4obl",
- "mIxyhXCAK1tXAiNZsld20znd8dNRU9cWnhTOtSEv8NKmvlZE8LYJHd2L14Wzui8pJvezWpEuc+LlEjUJ",
- "icpZGtex8qkyxMGt7cw0Jti4Rxg1I5asxxTLSxaMZZqpAQ/dFpDBHFFk+kSRfbibClfWpOTsXyUQlgHX",
- "5pPEU9k6qJhN0Wnbu9epkR26c7mBrYa+Hv4qMkaY2LJ94yEQmwWM0FLXAfdF9WT2C600UuhuXZskdjD4",
- "hzN2rsQNxnpHH46arfPiomlxC6uQdPmfIQybjnp7CRT/eHUZNnvmiJY0YSqZSfEHxN95+DyOhOL4VJ4M",
- "vVz+AD7A57zW7tSVWerZe7e7T7oJtVBNJ4UeqsedD8xymFPQa6gpt1ttKww0fN3iBBN6lR7Y8WuCcTB3",
- "PHFzejGlsYSLRsgwMB3VBuCGLl0L4jt73KsqsMHOTgJbctWW2SjrAmQdJdfN2HJJgcFOO1hUqCUDpNpQ",
- "Jhhb+1+uRGSYkl9QbgtVmH72KLneCqzyy/S6EBJzJKi42j+DlC1pHpccsrSr4s3YnNkaDKWCIMm/G8jW",
- "t7FU5AolVOE6DjXHM/JgHFQacbuRsXOm2DQHbPHQtphShZy8UkRVXczygOuFwuaPBjRflDyTkOmFsohV",
- "glRCHT5vKuPVFPQFACcPsN3Dr8ldNNspdg73DBbd/Tw6fPg1Kl3tHw9iF4CrobGJm2TITv7HsZM4HaPd",
- "0o5hGLcbdRINJ7dFtPoZ14bTZLsOOUvY0vG67WdpSTmdQ9xTZLkFJtsXdxMVaS288MxWgFFaijVhOj4/",
- "aGr4U4/3uWF/FgySiuWS6aUz7iixNPRUZ/C3k/rhbDkZl3zVw+U/oo208Cai1iPyepWm9n6LrRot2a/p",
- "EppoHRNqE2PkrPZe8CmhybHPu4PZaKsktBY3Zi6zdBRz0JlhRgrJuMaHRalnyd9IuqCSpob9TfrATaZf",
- "PYlk4G1mguS7AX7teJegQJ7HUS97yN7LEK4vucsFT5aGo2T36miP4FT2GnPjZrs+2+HmoYcKZWaUpJfc",
- "yga50YBTX4nw+IYBr0iK1Xp2osedV3btlFnKOHnQ0uzQz29fOiljKWQsmV593J3EIUFLBufouxffJDPm",
- "FfdC5oN24SrQ36zlwYucgVjmz3LsIfBMRF6nPit0pUl3vuoR7UDfMTUfDBlM3VBj0szAe/1GP6987hqf",
- "zBcPK/7RBvaGtxSR7FfQs4lBdvDodmbV98D+TckzsRq6qa0T4jf2M0BNFCUly7Nf6qjMVvJ1SXm6iNqz",
- "pqbjb3WZqGpx9n6K5qxbUM4hjw5nZcHfvMwYkWr/KYbOs2R8YNt2Pni73NbiasCbYHqg/IQGvUznZoIQ",
- "q82At8qhOp+LjOA8dYK0mnt26wgE2Z7/VYLSseAh/GCdulBvad67NtkwAZ7ha3FCvreVYBdAGulv8JVW",
- "ZRFwqW+tQr0sckGzMSZyOP326CWxs9o+ttiJTXY8x0dKcxUtfVWQ+3GYe7CvWxIPXRg+zmZfarNqpTEb",
- "ldJ0WcSCQ02LU98AI1BDHT4+X0LsTMiLoKajjSM1Qxh6mDG5NC+uajQruyBNmP9oTdMFPskaLLWf5Idn",
- "6fZUqYLKeFWFmyohIp47A7dL1G3zdI+JMO/mC6ZsAVA4h2Y8ahWc7VQCPj61uTxZcm4pJSp7bEoecBm0",
- "e+Cso4ZX80chayF+R4HcJrnfNWn5CfaKJmhqZ0DvlMSz0Y1V5RJf2DmlXHCWYnqk2NXsKoUOsYENyCTV",
- "VrL6I+5OaORwRfOuV25yDou9mdg9I3SI6yrhg69mUy112D81lqRcUE3moJXjbJCNffkApwdkXIFLcIl1",
- "ZQM+KWTDrogcMmqqTiqTxo5khGExPQ+778y31+7Zj/7iZ4yjgO/Q5lzTraYOCxlq8ypgmswFKLeeZmyw",
- "+tX0mWCYbAar9xNf+NBmg0GznFm2tUF3hzryFmlnATZtn5u2Lk9Q9XPDA9lOelQUbtL+4hJReUCveC+C",
- "I5bFxJt2AuRW44ejbSC3ja4keJ8aQoNzNERDgfdwhzCqQgutIj5GaLUUhS2IdeGKZjBgPALGS8ahLssZ",
- "uSDS6JWAG4PntaefSiXVVgQcxNNOgeZofY4xNKWd6eGqQ7VzCRmU4Br9HP3bWNeI6GEcVYNacKN8XVUD",
- "NdQdCBPPsQyxQ2S34gNKVU6IyjCioFUDIsY4DOP2VWaaF0D3GHRlIttdS2pPzi43UV+Q6LTM5qATmmWx",
- "jFTP8CvBrz65FKwgLavElEVBUsyJ0kwS06U2N1EquCqXG+byDa44XVBUJUINYWEXv8MYhDJd47+xrIz9",
- "O+OcMHZ2A/QeF64KxY5yc3OkjtRraDpRbJ4MxwTeKVdHRz315Qi97r9XSs/FvAnINaeG2MTlwj2K8bdv",
- "zcURZk7opBq1V0uV2ACd7oQvhYfPxiokt8mV8Crr5B5FY09VamuzAqK/aNYYL78e19sgIQa196u1HvY5",
- "4Ka9/uJUu8g1TclGFtQbDWS9d2zcD0IR15z2eexYhx3zudN7mGTYkbNx7I0I9a5gXYB+9H6mpKDMmcZr",
- "ZtHFrPNI71cXbjp09Qa3F+H8vHs1dj+e9/lkE8X4PAeC39tlhs7AhbNXdebtWr1Xkn8S2l9dmVc7XuUV",
- "H11/1zsBp7pZNWiv0vbUpbS3y3Rv8h9/sT5sBLiW689AhdvZ9E6Rpq60a9VTdRNSpUMelB65cSvG6y31",
- "5z+qcx4hPRVCsToFd6wQ00Bft1OspRTkb+qO5R1NziHVmHe9NqBLgF2yOZnJgiJ/t3mQet6OlUugS3+0",
- "KedRN9n6lgutE5YUhNbZRNWT4Rl+jio3KWRKmAF3DtzV2WsGHAx2e57NINXsfEsY2P8sgAchRmOvhLD1",
- "coOoMFa50WIWkd1VbDVAm6K0NsITZPO7Mjh9QSBnsL6jSIMaopmzx/5euUwCCcQAcofEkIhQMTcEqzV1",
- "lmGmKspALHi3H9sd6lRcvTV3gqDGS87lSdLcuHWg44Yp40U/Bs1luu4U/oseoX2RYt2iAf3C9gus0aCq",
- "eng+AUX4JCXH3TR9Fy6BBQbtVYYCn8oClP/NR+jaWXJ2BmFVIDTLXFCZ+RZRPYNXYSQb7qNOeJdPeN8G",
- "elbNzGonzW5ATyTxE7riprkw8lfS58/c9IsMi+ej94dN+Y0enwauGUhXPQ2FvVwoSLTwTp2b4NiEClfo",
- "/TJIUL3JFi1wvSlQ3tY5XjDpLMWUJ9R5toQLJBKW1EAng0ws/XNuQvZz+91HsPiko1vVKRW9bk80791z",
- "meogMaT6GXG35fbImMtoVhjntlariqVl4QaVoeq/kCIrU3tBhwej0j4NTnq0gZVElRJpd5Wd92WOKcBe",
- "BnGGZ7A+sKK/T9XvtzKE3opQdg1BXH9rt/eqdIq/r/O5XcB8L3DepOJmPCqEyJMeXf9xN7tM+wycsfQM",
- "MmLuDu/Y1lO2hNxFFXNlzL1YrH02laIADtm9CSFH3LoSe7tuM71xa3J+R2+af4WzZqVN+OR0SpN3PO6T",
- "iamY5BX5mx9mM1dTYJjfFaeyg2zJXbLqyWwj6UWkiM9k6KO0a2ltF1apicpCEZNStpSwiFiRfU0EX2HD",
- "R6xosWRpt4pCR5SYYTWqhEYGP64Y+LhRK5C1Cnf4HEO2TENKrQBnHg+U5aUEFzlgy+a00ukXVC/89pnm",
- "XTHLXNmg0K3fpmSnyj4K/OPE1expnwtRJDmcQ8OQ4MIZyjQFpdg5hPV+bGeSART4VG9fIDENeUhXLR7i",
- "1p4EOtYh2I0yFYtYu1NkC8foKcaeWPJQQ0nIQHTOspI28KeuUIplYG33ENaBJ2TnwxFfXOdouHIpSZXM",
- "LabIdOEkfgsN/dalXVoCUlCCpRqzpxZlhYWrCCO9qI1j9nJJNAbRQ1enHTkyQdmVzZqXMMdO7bwrrWkE",
- "X2r+1LW39FV9GocVgPEdtoAXKuSCEjBeEnLg3LCH7asKKcFSeimhsfxtOj63wJp9BVtkebdZps14Zr2z",
- "mvsSKHDV80ov2leXqa0+xYQ6gmOSsa7aVaGpDHOVh4RjeLc8p/n1q04x09IR4sPVuY0vNNS9hUi2qFSX",
- "c3N7SQfNHejZ9jc1f4Oq3v8Bs0dRG6cbytk8KlnBW4aQZdKc5KKucIdDkgsc0xpFH35Fpi5Ep5CQMsVa",
- "0YsXPo1ypWrCqgJ1+ePNuq1t6/xF6CuQ8cyLL+R1nZJVC7wxagjrI3rDTKXn5EapPEZ9HbKI4C/Go8Jc",
- "GVuui7OGtdSmuG65AQoJe7aaBv5PO1pNu1lAhi7PWgbNpVMq6K5z8G3dwG3koq7XNtTk30XuprydQyz1",
- "8XS8pju6CliEYC5rgqCS3x/+TiTMsFiNIPfv4wT3749d098fNT+b43z/frzM8nU5CVgcuTHcvDGK+aXP",
- "bdy6RvdEKLT2o2R5to0wGvEmdbknjKj4zUWc3UjBqd+sLad7VF3Rj13ck9qbgIiJrLUxeTBVEEkyIIjE",
- "dYuEjKBWJC0l02tMhONV/+y3qDvD95W10Fmbq9QJ7u7T4gyqVEq1bbFU/nb9XtAc7yMjU6NzmMbSut+u",
- "6LLIwR2Ub+5M/xMe/+1J9uDxw/+c/u3B0wcpPHn69YMH9Osn9OHXjx/Co789ffIAHs6++nr6KHv05NH0",
- "yaMnXz39On385OH0yVdf/+cdw4cMyBbQkQ+7Hv0vVmVLjt4cJ6cG2BontGBVRW1Dxr60DE3xJMKSsnx0",
- "6H/6P/6ETVKxrIf3v45cVOdooXWhDg8OLi4uJmGXgzkaExItynRx4OfpVjJ+c1xF5tinJe6oDbrwKgNP",
- "Ckf47e23J6fk6M3xJKiUeTh6MHkweYiFFAvgtGCjw9Fj/AlPzwL3/cAR2+jww8fx6GABNEfbu/ljCVqy",
- "1H9SF3Q+BzlxNXbMT+ePDrwocfDBGVI+mlHnsRRBNsYoCCzplp5xRll01LQxRI1U7splFh9XCf6d1oJn",
- "GPphbROGtVXIOs7qTLbHNaPy+XxsgsPDXyOVB2dsbt7RjYqqrVqt5uX93yc/vSZCEvekeUPTs8pthhzP",
- "bG4GKc4ZRhRkQRiK6TnxNPuvEuS6pinH7cLkfT5fu4vTWKp50XRqriWpWEHxWJkfnNmQQkDMldmzZlZa",
- "lhBCUrNew04fJF+///D0bx9HAwBBG7wCTO3wO83z322FWFihIdMnR3LJL8aR3OQoQY9rMxp2qHdyjF7Z",
- "1dew/EzVphkL9DsXHH7v2wYHWHQfaJ6bhoJDbA/eY/IBJBY8Z48ePNhb3aoq/M36dlejeJK4xEBdJmQ/",
- "RUrR+vJVPXVon+xxoU3n1Ssvtz1cZ9HPaIYlQUBpu5SHX+xSjjm6wZhLgdhL7+N49PQL3ptjbngOzQm2",
- "DDL7dC+an/kZFxfctzQCT7lcUrlGcSaoW9QKraVzhRYVZJH2bDcqlYzef+y99Q7CQgwHHxqeFNmV7sRO",
- "DZrjF1uuyTuqj3N282K26jyY71Uaf7S1u2IWWFhA3ZuQ78PeyL0xzYRN4lBKDpl3hPC3XpU3y2fjqmG7",
- "o8IMHNFLO1AR397fN31/HzUVHI3cizFgGqdgI0wdb6yrXqDdCKpWJb9LVcoLKi5cIm/1Jy0n1Hpf2pne",
- "x55/Wxn1Le56cNcnJgXwVhJTs1LGp2fNPmKhukkaV8YnZNxfuND3iuaGToLltqK5bULSW2HwLyMMVg66",
- "tiCvz8F9NfEQq/EcfPBJZvcgErokuwOEwfBZHfQNkqDebbGTexObMTZsczme4Txyt4p5mPr3VsD7DAS8",
- "blrtGBh1suSbE+oQhkWdd3uXarqNMlk75Qf/QqW4vzCyesU2A+l2ge0S7LMjjDlm/cnY6p9SCHNIuxW/",
- "/tLiVxUncyUBrJEY30VeBWasK2nv2to5pitJrBkrFXC2qgyxO8LjuoiPYTGYV8mn1FBj/zJEE6p9NNrN",
- "GnfejV0R63sIH6jP1scvtklXX5CeZ3B+v8gtEN+bT81Lo2aHt9djdhjGm548eHJ9EIS78Fpo8h3e4p+Y",
- "Q35SlhYnq11Z2CaOdDC1mYc3cSXeYkvIKOqMwgGPwsINYdZi65Fx15XLDDNB3JsQn99YVdUaXP6BuaB5",
- "nWeJyrntZHicQQK54/88xPHvTMh3GBGg1Rgdy7RL5U/uMK4PHz56/MQ1kfTC+m21202/enJ49M03rlmd",
- "zdq+bzrNlZaHC8hz4Tq4u6E7rvlw+L9//8dkMrmzlZ2K1bP1a5s67nPhqeNYPEG18X279YVvUuyV7lL6",
- "bUXdtZjtn4lVlPuL1e3tc2O3j8H+n+LWmTbJyD1AKw1mI95+j7eQPSa73ENjnx3a8J3qMpmQ18KlPilz",
- "KomQGUhX3mZeUkm5BsgmnlIxak3ZVA9pzoBr82DEgh0yUSwDGzE+LyVkJGdLrGgr4Rz94XF6fMs3INjO",
- "6NFr9rNl8q/oKkiHMK2uaS3cklHduaQrXzIIi2IIiT998w15MK5fLXluBkgqxMSY65KuRteo7auIbZCv",
- "eTOr/1ZnXBx7iOaoln5sdTbaTCH+1+bcX6zEbsndbeyeOOfOBp/aoBPqD1yCkY2aAyvY2YJCWOFmXYfz",
- "GinPi1BxFmdmGKoU+IxtA1tV0tHHZxu9t4f49vF/JVbSJqgd2QZGmKqDD2jLCHlG59xihNxfy0wa2Iyk",
- "WHqjkSAz0OnCBee2UB9hT76mQD9v2lRRct9SDe5iN6V1mN8RKx0OTBgSxE2i4Q5khIh/8ul9zWc2sxkq",
- "fL0IXzgVTVLM1xKryoi5YotMeT9+H8NrdnEnKJ/Xk3cFMkTLPuyetwjeDcEd5vitr1mFGHOL+DN4+vun",
- "ZEJeizpE3JVL+DOaHD/lzf6pF/RacLC2dSP5Wlq8NaNWYgeq8BEpPjeIfb9UiasvLYIc+OprG+WQH2zt",
- "s42yyJDb20z2RV7hP0TrGjduGbO2ydbEB/VoQ5izaWhzTDezS9/gK+ZG+Oln+LS5CY51PSwGD6nnM04s",
- "4PtlOphuxxLzQZVYuI8DxXO1D+ZGWlTuZ9H06lPIBZ+rz5MVbaKOOF4iVFJlsY+nqv/rnd3nmMnHPHmt",
- "56PL7aQYT8FWF/QF5l3iNQvh364PQs2WPhcnD2NWb5i7PH3w+PqmPwF5zlIgp7AshKSS5WvyM68qQV6F",
- "22Ei/irXmtcGR2svoLWpmQMsDRMWXZ4JNlzWPugVyz5uZ4ZBxr4d+SDjAR8M8wvSogAqL88At5uuTlsz",
- "Hr8IvYIb+eGr7FkRUAyKdnSM/4/RQL0ThruLmbv8Sm4B9Zm+HJtwLrtiNq6cY4wUIGaH5B2/T9SCPn34",
- "6LdHT7/yfz56+lWP5szM4xL0dHVn9UDmsx1miALti1YH7ldqr/B7eN27vdsmjkcsW0XzRdc1YTpZc51Y",
- "dkeRgq5708wXW2rahMPW9W2uP7Gh0my6iL6v/POnqnp7zJ9Vr2Cbfc+VgrmtZdMTNBHwGUNodVGbCuub",
- "69tskCZbZFkVErnux2kdXGAvOo882bpzblTQ1Tf1SE3wjQrcCzZNtNycTIk5zceBubuqI46+K2VRCKmr",
- "060mg8Q96DPbNaS9PsLdSZhLqU4XZXHwAf+D2bw+1gEHtnJnYOdzv8OqAMmM6IiFe9yvtvL+gbXtb5L+",
- "TmyLK96ULTHbehS0Mqz7dHPO30DMyCuWSnGECfTdJaTWSsOykxPQdf1tU0336IUleM44JEvBY5nqfsKv",
- "r/BjNEW/0DTv63xqPvb1bbHMJvwtsJrzDOGXV8XvZ/JAv5JiqbVaCeZw11XPLP3veAD9oVnztHuS1jzt",
- "Hr5G6bWenw8+NP50nj2upVqUOhMXQV98FloONcSoH2TQHq5Nr15KrUzUimSgDNF+eaqrAA+xE1N9jeQn",
- "C/Kk96Yo+4sqs2aMZy0iQTkzFecgVaXmkN4J51aj9efRaA3e9514rM3HuY2jlWq/EslrkYEdt5kCNxZE",
- "ykUGLm1oVxCpJLO4FsDfSnW71rsspeV8oUlZEC1iL8C6Y0JTy2RtcUa1rZqdbeVrsJwDobkEmq3JFIAT",
- "MTWLblYFJVShA71/Rjr5M16UrYarkCIFpSBLfNDsNtCqZKz46NQb8ISAI8DVLEQJMqPyysCenW+Fs0pg",
- "rsjdH39R924AXisKbkasdduNoLdyDXLSXhfqYdNvIrj25CHZUQnEiwao9RLLIgen94qgcCec9O5fG6LO",
- "Ll4dLagYYp+Y4v0kVyOgCtRPTO9XhbYsEnN/R8pG2q+nbImSGKdcKEgFz1R/cddtbBmLiARrUWYFASeM",
- "cWIcuOfB+ZIq/daZQMJaX0GxEjPFhmq0fYnyzci/VGnyO2On5j7kqlRVLn2n1ojX2+Kw2jDXa1hVc6EN",
- "yo9d6U20IKWCbSP3YSkY3yFLheVldWA8wlIi3cVhphPqFBRdVDaAqBGxCZAT36pRSK42bPQAwlSN6Kpm",
- "ZJNygqJZSouiwFp2Scmrfn1oOrGtj/TPddsucblyRnhvZwJUqNNykF9YzCoM5VhQRRwcZEnPnNpr7jJB",
- "RQp9sSUkaK5ONlG+OZYnplV4BLYe0rKYS5ph2VEaUaX8bD8T+3nTALjjnjyxpnMyhVm0NInZ9JqSZa+K",
- "qBpa4HgqJjxiCWhFUnMEZ1grxxOI671l5Ax66k+fBjUxXXOcK7pFfjxctt3qHrWUGcPsuCUHhNgx9CHw",
- "9qChGvnymMDOSa09aE/xd1BugkqM2H2SNai+JdTj77SAtjYvvL8aF0WLu7cYcJRr9nKxLWyk78TG9Idf",
- "ZKhf25j7CT3VmvrT4P03uczb9uCCMp3MhHRV9elMg4yo8lqFDijTPpLQmlW0cH4UBEdw16Ybx1Vnr9Nx",
- "OCZiQSC+didbRrL7mKm+E3JQOFDT6Y0yTUquWR6ERFcv5c9PX3irA7jVAdzqAG51ALc6gFsdwK0O4FYH",
- "cKsDuNUB3OoAbnUAf1kdwE3F9yVe4PBez1zwhMOcanYOVeDfbUqiP1U8THVVeZ0EajEuKNMuwSehXgzA",
- "L1cLB9RAc8QBy5HHFkL1Zk7CctBKlDIFkhoIGSdFTs3TAFa6SjfXTGTqUyu7gtCYG5UqePyInPxw5N32",
- "F869vNn27pFLUa70Ood7LqFDVbHVZ3YAbpDuEjtQfyX4tHQuSR/LgSiD3m+x9Qs4h1wUIK1HMNGyjGh8",
- "ToHmzx1utih8GgU3zWi/jxt6Joe2JS2Cwve4VqoItbEczXqZM5qr/oKZdrwlLWKZ4aqLz6qCkJs8E9m6",
- "dULMrh3gBjbPRu28zziV60jgTudEdEhDC8OvHGF1dVkf9x5i0iXaLplto7CYtC5BRc/xJiqPxlZUG9YZ",
- "yoYAzVp0Ei0o3Q4oGFUADnGANfTs94S8tf1uNoAdIXJHrGbmn43fYLNlxTSwrXlEONbzpUabe8RHTy+e",
- "/bEh7KxMgTCtiI9S2X69jEerxIw0B544BpRMRbZOGuxr1LiFMqaoUrCcbr+JQv7pciG7y8d82XxP3cw1",
- "8iJY3CaeHBLNKnEMuIc7rzUM5s0VtnBEx54DjH9qFt3HRkMQiONPMaVSuwLNjkyvnmZ9y/huGV9wGlsS",
- "AeMuqq/NRCafkPHJtSx5P8/7dgVpaYALT/Jd1M6jSQ5WumHXzGBazueY07ljozNLAxyPCX5DrNAudygX",
- "3I2C7OBVns+rppZqD9flLkEE210hyVyKsrhni1fxNRozlgXla2/yhUSxZZlbHNp0ePtltDbwrusIgOZY",
- "p/vr02q/8Sq/QHfrrtrm7xYt5IIqYvcXMlLyzEUOdcJzV3x4Pmk79OmK12x6Y0Zpu97I6ty8Q64Iv8su",
- "xKUycxcgE73i9kA1k77bMGB7cie3uWz/GtfGG5uHoYfBdkNaa4awp9tDBnwNr48gcUkdCteswGXrA/YF",
- "joRZTGzLvTqPdIZv+pAE1fmsjRTyglBfaCAVXGlZpvodp2ijCRY26fqXeG10P3977pvEzYQRK54b6h2n",
- "mIe+stxE+dwMImaK7wA8G1XlfA7K8MqQSGYA77hrxTgpuXlpiRlZslSKxIahmjNk5JOJbbmkazKjORoZ",
- "/wApyNTc7MGuW4Wx0izPnUOLmYaI2TtONcmBKk1eMcNlzXA+w1jlyQX6QsizCgvxpBZz4KCYSuLKl+/t",
- "V8wb4ZbvlXyosLSf63jv600Y4WFnWS/kxy8M3BRT5ORM6doHogP7tdm/l4wnUSI7XQBxLmFt2iJ3MYOM",
- "I6B7TeuQXsA7bm44LQhydaovRw5tM0/nLNrT0aKaxka0rEF+rYOeeHvhMiTCZG5NK3+iwMyADrz5Ejce",
- "K9S0935HM8rGopexry7PWE8j90hoKMKaF/eJa3HaAPnPm6P+/b60ZsNtGgtAnlnp+EPOqgWp9ujTas42",
- "upTUOeWWS8gY1ZCvSSEhBczkgr429YNzYmP2SbqgfI73jhTl3JWDtuNcgIQq/ZZ547WHiGf8WPEEBd2I",
- "48ARsco6T17m6GHBwxCd2NlyZ/Oo9Ki1CRWGPBsjx+F7M2bfK3I86pUSDVLPa98ui5zmGYlj4UKUeZao",
- "Mk0h5qFxHPWaqpbaFOBwLCu7uQEhI1kpbWVsQlNdYg2aKebFFLbkFOXr5vWPtbuEDPJnEkoUm3OqSwlj",
- "uzb0FJsCQYPjJOI+07rcG9d0sPM1Stuo2Ed5gdtjcHsM/nzHoHPpvHVEMmupEixlhAT4pyoMUWeOO3Il",
- "Ntq50f6sRSL61clus4W8TtXZ9Zdi+JTPi0+9mk/1WvFsXBGK9X5DTtDKpmgZA1WEacctp0DgnOYlclOX",
- "L9694SfktOa1lZd4qVwa03RBGXfZa6p4BoRDu1TL2ud23JdCk17oFXf6TMs2UZFpsAFpKZle40OGFuy3",
- "MzD/f29eArb+qH3jlDIfHY4WWheHBwdYsn8hlD4YfRyH31Tr4/sK/A/+eVJIdo4VfN5//P8BAAD//6G6",
- "O0DCSQEA",
+ "H4sIAAAAAAAC/+x9a5PbtpLoX0Fpt8qPFTV+Zk+mKrV3YucxG8dx2ZOcPRv7JhDZknCGAngAUCPF1//9",
+ "FhoACZKgRM1oxnYyn+wRSaDRaDT63e9HqVgWggPXanT8flRQSZegQeJfNE1FyXXCMvNXBiqVrNBM8NGx",
+ "f0aUlozPR+MRM78WVC9G4xGnS6jfMd+PRxL+VTIJ2ehYyxLGI5UuYEnNwHpTmLerkdbJXCRuiBM7xOnz",
+ "0YctD2iWSVCqC+VPPN8QxtO8zIBoSbmiqXmkyAXTC6IXTBH3MWGcCA5EzIheNF4mMwZ5piZ+kf8qQW6C",
+ "VbrJ+5f0oQYxkSKHLpzPxHLKOHiooAKq2hCiBclghi8tqCZmBgOrf1ELooDKdEFmQu4A1QIRwgu8XI6O",
+ "fx0p4BlI3K0U2Ar/O5MAf0CiqZyDHr0bxxY30yATzZaRpZ067EtQZa4VwXdxjXO2Ak7MVxPyY6k0mQKh",
+ "nLz+9hl5/Pjxl2YhS6o1ZI7IeldVzx6uyX4+Oh5lVIN/3KU1ms+FpDxLqvdff/sM53/jFjj0LaoUxA/L",
+ "iXlCTp/3LcB/GCEhxjXMcR8a1G++iByK+ucpzISEgXtiXz7opoTzf9RdSalOF4VgXEf2heBTYh9HeVjw",
+ "+TYeVgHQeL8wmJJm0F8fJF++e/9w/PDBh3/79ST5X/fn08cfBi7/WTXuDgxEX0xLKYGnm2QugeJpWVDe",
+ "xcdrRw9qIco8Iwu6ws2nS2T17ltivrWsc0Xz0tAJS6U4yedCEerIKIMZLXNN/MSk5LlhU2Y0R+2EKVJI",
+ "sWIZZGPDfS8WLF2QlCo7BL5HLlieGxosFWR9tBZf3ZbD9CFEiYHrUvjABX26yKjXtQMTsEZukKS5UJBo",
+ "seN68jcO5RkJL5T6rlL7XVbkbAEEJzcP7GWLuOOGpvN8QzTua0aoIpT4q2lM2IxsREkucHNydo7fu9UY",
+ "rC2JQRpuTuMeNYe3D30dZESQNxUiB8oRef7cdVHGZ2xeSlDkYgF64e48CaoQXAER039Cqs22//ebn14S",
+ "IcmPoBSdwyuanhPgqcggm5DTGeFCB6ThaAlxaL7sW4eDK3bJ/1MJQxNLNS9oeh6/0XO2ZJFV/UjXbFku",
+ "CS+XU5BmS/0VogWRoEvJ+wCyI+4gxSVddyc9kyVPcf/raRuynKE2poqcbhBhS7r+6sHYgaMIzXNSAM8Y",
+ "nxO95r1ynJl7N3iJFCXPBog52uxpcLGqAlI2Y5CRapQtkLhpdsHD+H7w1MJXAI4fpBecapYd4HBYR2jG",
+ "nG7zhBR0DgHJTMjPjrnhUy3OgVeETqYbfFRIWDFRquqjHhhx6u0SOBcakkLCjEVo7I1Dh2Ew9h3HgZdO",
+ "BkoF15RxyAxzRqCFBsusemEKJtyu73Rv8SlV8MWTvju+fjpw92eivetbd3zQbuNLiT2SkavTPHUHNi5Z",
+ "Nb4foB+Gcys2T+zPnY1k8zNz28xYjjfRP83+eTSUCplAAxH+blJszqkuJRy/5ffNXyQhbzTlGZWZ+WVp",
+ "f/qxzDV7w+bmp9z+9ELMWfqGzXuQWcEaVbjws6X9x4wXZ8d6HdUrXghxXhbhgtKG4jrdkNPnfZtsx9yX",
+ "ME8qbTdUPM7WXhnZ9wu9rjayB8he3BXUvHgOGwkGWprO8J/1DOmJzuQf5p+iyM3XupjFUGvo2F3JaD5w",
+ "ZoWToshZSg0SX7vH5qlhAmAVCVq/cYQX6vH7AMRCigKkZnZQWhRJLlKaJ0pTjSP9u4TZ6Hj0b0e1/eXI",
+ "fq6OgslfmK/e4EdGZLViUEKLYo8xXhnRR21hFoZB4yNkE5btodDEuN1EQ0rMsOAcVpTrSa2yNPhBdYB/",
+ "dTPV+LbSjsV3SwXrRTixL05BWQnYvnhHkQD1BNFKEK0okM5zMa1+uHtSFDUG8flJUVh8oPQIDAUzWDOl",
+ "1T1cPq1PUjjP6fMJ+S4cG0VxwfONuRysqGHuhpm7tdwtVtmW3BrqEe8ogtsp5MRsjUeDEfMPQXGoVixE",
+ "bqSenbRiXv7evRuSmfl90MefB4mFuO0nLlS0HOasjoO/BMrN3RbldAnHmXsm5KT97eXIxowSJ5hL0crW",
+ "/bTjbsFjhcILSQsLoHti71LGUUmzL1lYr8hNBzK6KMzBGQ5oDaG69FnbeR6ikCAptGD4Ohfp+fdULQ5w",
+ "5qd+rO7xw2nIAmgGkiyoWkxGMSkjPF71aEOOmHkRFXwyDaaaVEs81PJ2LC2jmgZLc/DGxRKLevwOmR7I",
+ "iO7yE/6H5sQ8NmfbsH477IScIQNT9jg7J0NmtH2rINiZzAtohRBkaRV8YrTuvaB8Vk8e36dBe/SNtSm4",
+ "HXKLwB0S64Mfg6/FOgbD12LdOQJiDeoQ9GHGQTFSw1INgO+5g0zg/jv0USnppotkHHsIks0Cjeiq8DTw",
+ "8MY3s9TG2ZOpkJfjPi22wkltcibUjBow33ELSfhqWSSOFCNmK/tCa6Day7edabSHj2GsgYU3ml4DFpQZ",
+ "9RBYaA50aCyIZcFyOADpL6JMf0oVPH5E3nx/8vTho98ePf3CkGQhxVzSJZluNChy1+lmROlNDve6K0Pt",
+ "qMx1fPQvnnhDZXPc2DhKlDKFJS26Q1kDqBWB7GvEvNfFWhPNuOoKwCGH8wwMJ7doJ9a2b0B7zpSRsJbT",
+ "g2xGH8KyepaMOEgy2ElM+y6vnmYTLlFuZHkIVRakFDJiX8MjpkUq8mQFUjER8aa8cm8Q94YXb4v27xZa",
+ "ckEVMXOj6bfkKFBEKEuv+XC+b4c+W/MaN1s5v11vZHVu3iH70kS+tyQqUoBM9JqTDKblvKEJzaRYEkoy",
+ "/BDv6O9AoyhwxpbwRtNl8dNsdhhVUeBAEZWNLUGZmYh9w8j1ClLBbSTEDu3MjToEPW3EeBOd7gfAYeTN",
+ "hqdoZzzEse1XXJeMo9NDbXgaaLEGxhyyeYMsr66t9qHDTnVHRcAx6HiBj9HQ8RxyTb8V8qy2BH4nRVkc",
+ "XMhrzzl0OdQtxplSMvOt16EZn+fN6Ju5gX0SW+NHWdAzf3zdGhB6pMgXbL7QgVrxSgoxOzyMsVligOID",
+ "q5Tl5puuavZSZIaZ6FIdQASrB6s5nKHbkK/RqSg1oYSLDHDzSxUXznriNdBRjP5tHcp7emH1rCkY6kpp",
+ "aVZbFgS9t537ov4woak9oQmiRvX4riqno33LTmdjAXIJNNuQKQAnYuocRM51hYuk6HrWXrxxomGEXzTg",
+ "KqRIQSnIEmeY2gmaf89eHXoLnhBwBLiahShBZlReGdjz1U44z2GTYKCEInd/+EXd+wjwaqFpvgOx+E4M",
+ "vZWa77yAXaiHTb+N4NqTh2RHJRB/rxAtUJrNQUMfCvfCSe/+tSHq7OLV0bICif64a6V4P8nVCKgC9Zrp",
+ "/arQlkVP+J9Tb42EZzaMUy68YBUbLKdKJ7vYsnmpoYObFQScMMaJceAewesFVdr6kBnP0PRlrxOcxwph",
+ "Zop+gHvVEDPyL14D6Y6dmnuQq1JV6ogqi0JIDVlsDRzWW+Z6CetqLjELxq50Hi1IqWDXyH1YCsZ3yLIr",
+ "sQiiunK1uCCL7uLQIWHu+U0UlQ0gakRsA+SNfyvAbhgC1QMIUzWiLeEw1aKcKu5qPFJaFIXhFjopefVd",
+ "H5re2LdP9M/1u13iorq+tzMBCiOv3PsO8guLWRv8tqCKODjIkp4b2QPNINbZ3YXZHMZEMZ5Cso3yUcUz",
+ "b4VHYOchLYu5pBkkGeR00x30Z/uY2MfbBsAdr9VdoSGxUUzxTa8p2QeNbBla4HgqJjwSfEJScwSNKlAT",
+ "iPt6x8gZ4Ngx5uTo6E41FM4V3SI/Hi7bbnVkRLwNV0KbHXf0gCA7jj4E4B48VENfHhX4cVLrnu0p/gHK",
+ "TVDJEftPsgHVt4R6/L0W0GNDdQHiwXlpsfcWB46yzV42toOP9B3ZHoPuKyo1S1mBus4PsDm46teeIOpm",
+ "JBloynLISPDAqoFF+D2x8TftMS+nCg6yvXXB7xjfIsvJmUKRpwn8OWxQ535lAzsDU8chdNnIqOZ+opwg",
+ "oD5czIjg4SuwpqnON0ZQ0wvYkAuQQFQ5XTKtbcB2U9XVokjCAaJ+jS0zOieeDYr0OzDEq/gGhwqW192K",
+ "8cjqBNvhO2spBg10OF2gECIfYCHrICMKwaB4D1IIs+vMxY776GFPSQ0gHdNGD251/d9RDTTjCsg/RElS",
+ "ylHlKjVUMo2QKCigAGlmMCJYNaeL7KgxBDkswWqS+OT+/fbC7993e84UmcGFT7gwL7bRcf8+2nFeCaUb",
+ "h+sA9lBz3E4j1wc6fMzF57SQNk/ZHVngRh6yk69ag1deInOmlHKEa5Z/ZQbQOpnrIWsPaWRYVAWOO8iX",
+ "EwwdWzfu+xu2LHOqD+G1ghXNE7ECKVkGOzm5m5gJ/s2K5j9Vn+3Q6eooMLZcQsaohnxDCgkp2Oh8I6qp",
+ "auwJsXF76YLyOUroUpRzFzhmx0EOWyprC5El7wwRlWL0midoVY5xXBcs7BM0jPwC1OhQbZO01RguaDWf",
+ "y8kZchX6nYuY6KNeqfGoV8U0SF3VKqZFTjPLZAD3bQhYAX7qiQf6LhB1Rtjo4ivcFkO9ZnOvx0ZeDx2D",
+ "sjtxEMpWP+yLZjP6bb45gJRhByISCgkK74TQLqTsUzELM8rcpaE2SsOyazq3n/7Wc/xe9ypogueMQ7IU",
+ "HDbRJGrG4Ud8GD1OeC/1fIwSQt+3baG/AX8LrOY8Q6jxqvjF3W6f0LaLSH0r5KF8kHbAwfL0AJffTv+2",
+ "m/Kyjkma5xFfnss3aTMANa7y25kkVCmRMhSSTjM1tgfNuf9cckoT/a+qKNoDnL32uC2nVZjKiEZZyAtC",
+ "SZozNNkKrrQsU/2WUzQKBUuNRBt57bffTPjMvxK3S0bMhm6ot5xipFllKopGSMwgYhf5FsBbC1U5n4PS",
+ "LeViBvCWu7cYJyVnGudamuOS2PNSgMSQn4l9c0k3ZGZoQgvyB0hBpqVuituYTqU0y3PnQTPTEDF7y6km",
+ "OVClyY+Mn61xOO9l90eWg74Q8rzCQvx2nwMHxVQSj4r6zj7FgFW3/IULXsX0d/vY+lzM+HXO1QZtRnVK",
+ "9/+9+1/Hv54k/0uTPx4kX/7H0bv3Tz7cu9/58dGHr776f82fHn/46t5//XtspzzssWQfB/npc6eKnj5H",
+ "faN2unRgvzGD+5LxJEpkYfhEi7bIXUxsdQR0r2mN0gt4y/WaG0Ja0ZxlhrdchhzaN0znLNrT0aKaxka0",
+ "rE9+rXtK8VfgMiTCZFqs8dJSVDeQMJ5Wh15AlymH52VWcruVXvq2WSM+oEvMxlXqpK2qckwwr25BfTSi",
+ "+/PR0y9G4zofrno+Go/c03cRSmbZOpb1mME6ppy5A4IH444iBd0o0HHugbBHY9dsMEU47BKMVq8WrLh5",
+ "TqE0m8Y5nI/Fd0aeNT/lNkjenB/0KW6cq0LMbh5uLQEyKPQiVm2hIajhW/VuArTiPAopVsDHhE1g0jay",
+ "ZEZfdFF0OdAZZv2j9imGaEPVObCE5qkiwHq4kEGWjBj9oMjjuPWH8chd/urg6pAbOAZXe87Kgej/1oLc",
+ "+e6bM3LkGKa6YxNw7dBBymRElXZZQY0IIMPNbI0ZK+S95W/5c5gxzszz47c8o5oeTaliqToqFcivaU55",
+ "CpO5IMc+0eg51fQt70havWWgghQvUpTTnKXkPFRIavK0pT26I7x9+yvN5+Lt23edYIiu+uCmivIXO0Fi",
+ "BGFR6sQVJkgkXFAZczapKjEdR7aVR7bNaoVsUVqLpC984MaP8zxaFKqdoNpdflHkZvkBGSqXfmm2jCgt",
+ "pJdFjIBiocH9fSncxSDphberlAoU+X1Ji18Z1+9I8rZ88OAxkEbG5u/uyjc0uSlgsHWlN4G2bVTBhVu1",
+ "EtZa0qSg85hP6+3bXzXQAncf5eUl2jjynOBnjUxRHwmPQ9UL8Pjo3wALx95Zb7i4N/YrX4QqvgR8hFuI",
+ "7xhxo/a0X3a/gtzRS29XK/+0s0ulXiTmbEdXpQyJ+52patPMjZDlwx8Um6O26sr4TIGkC0jPXX0VWBZ6",
+ "M2587iNsnKDpWQdTtvKOzfzC2g/oEZgCKYuMOlGc8k07CV+B1j6O9zWcw+ZM1KUj9sm6byaBq76DipQa",
+ "SJeGWMNj68Zob74L40LFvih8LjUm1XmyOK7own/Tf5CtyHuAQxwjikaSch8iqIwgwhJ/DwousVAz3pVI",
+ "P7Y8o2VM7c0XqcLjeT9xr9TKk4u4CleDVnf7fAlYxktcKDKlRm4XrgKVTXQOuFip6Bx6JOTQKTMwnbjh",
+ "yMFBdt170ZtOzNoXWue+iYJsX07MmqOUAuaJIRVUZlpxdn4m6/dzngksLOkQNs1RTKoCEi3TobLhHLOV",
+ "8vpAixMwSF4LHB6MJkZCyWZBlS+OhTXE/FkeJANcY+L+tnItp0GIWFAorCrG4nlu+5x2tEtXtMVXavHl",
+ "WULVckCpFSPhY1R6bDsERwEogxzmduH2ZU8odRGBeoMMHD/NZjnjQJJYtFlgBg2uGTcHGPn4PiHWAk8G",
+ "jxAj4wBs9GfjwOSlCM8mn+8DJHdFEKgfGz3hwd8Qz9ey8ddG5BGFYeGsx6uVeg5AXYhidX+1AmVxGML4",
+ "mBg2t6K5YXNO46sH6VQNQbG1VSPERVTc6xNntzhA7MWy15rsVXSZ1YQykwc6LtBtgXgq1olN2IxKvNP1",
+ "1NB7NCQd00djB9PWZ7mjyFSsMUoHrxYbAr0Dln44PBiBhr9mCukVv+u7zS0w26bdLk3FqFAhyThzXkUu",
+ "feLEkKl7JJg+crkblFy5FAAtY0ddv9gpvzuV1KZ40r3M61ttXJcS89k+sePfd4Siu9SDv64VpiqS8qot",
+ "sUTtFM1gk2Z9mECEjBG9YRNdJ03XFaQgB1QKkoYQlZzHPKdGtwG8cd74zwLjBVahoXxzL4hgkjBnSkNt",
+ "RPdxEh/DPEmx+J0Qs/7V6ULOzPpeC1FdU9aNiB82lnnjK8AQ4BmTSifogYguwbz0rUKl+lvzalxWasZI",
+ "2VKxLIvzBpz2HDZJxvIyTq9u3h+em2lfVixRlVPkt4zbgJUpljaORk5umdoG125d8Au74Bf0YOsddhrM",
+ "q2ZiacilOcdnci5anHcbO4gQYIw4urvWi9ItDDLIeO1yx0BuCnz8k23W185hyvzYO6N2fN5t3x1lR4qu",
+ "JTAYbF0FQzeREUuYDioDd1NRe84ALQqWrVu2UDtqr8ZM9zJ4+HpqLSzg7rrBdmAgsHvGsmEkqGbpvFrA",
+ "tzWeG5VrJoMwc9YscBcyhHAqpnyHgi6iqmy5Xbg6A5r/AJtfzLu4nNGH8ehqptMYrt2IO3D9qtreKJ7R",
+ "NW9NaQ1PyJ4op0UhxYrmiTMw95GmFCtHmvi6t0ffMKuLmzHPvjl58cqB/2E8SnOgMqlEhd5V4XvFZ7Mq",
+ "W6Wv54D4CuhG5/MyuxUlg82vSouFRumLBbhS0oE02ql5WTscgqPojNSzeITQTpOz843YJW7xkUBRuUhq",
+ "8531kDS9InRFWe7tZh7anmgeXNywwqlRrhAOcGXvSuAkSw7KbjqnO346aurawZPCubYUu17aeu6KCN52",
+ "oWPM86ZwXvclxYqV1irSZU68XKIlIVE5S+M2Vj5Vhji49Z2Zlwm+3COMmhFL1uOK5SULxjKvDalJ0wIy",
+ "mCOKTBUti1Pjbipcr56Ss3+VQFgGXJtHEk9l66BieRNnbe9ep0Z26M7lBrYW+nr4q8gYYbXW9o2HQGwX",
+ "MEJPXQfc55XK7BdaWaTMD4FLYg+Hfzhj50rc4qx39OGo2QYvLpoet7C1Tpf/GcKwNdZ39/XxyqsrG9sz",
+ "R7RPD1PJTIo/IK7noXocSTTy9WkZRrn8AWGiQ9idosFiKutO3W6onr13u/ukm9AK1QxS6KF63PnALYeF",
+ "Mr2FmnK71bZtRiPWLU4wYVTpkR2/JhgHcycSN6cXUxqrImqEDAPTSe0AbtjStSD+Y497VWVb2NlJ4Euu",
+ "3mU2ibwAWecAdgvSXFJgsNMOFhVqyQCpNpQJxtb/lysRGabkF5Tb7ivmO3uU3NcKrPHLfHUhJJaAUHGz",
+ "fwYpW9I8LjlkadfEm7E5s41FSgVB5wo3kG3aZKnIdf+ocogcak5n5ME4aJ/jdiNjK6bYNAd846F9Y0oV",
+ "cvLKEFV9YpYHXC8Uvv5owOuLkmcSMr1QFrFKkEqoQ/Wmcl5NQV8AcPIA33v4JbmLbjvFVnDPYNHdz6Pj",
+ "h1+i0dX+8SB2AbjGMNu4SYbs5O+OncTpGP2WdgzDuN2ok2i2vO0M18+4tpwm++mQs4RvOl63+ywtKadz",
+ "iEeKLHfAZL/F3URDWgsvPLNtjZSWYkOYjs8Pmhr+1BN9btifBYOkYrlkeumcO0osDT3VbSnspH442yPJ",
+ "VRT2cPmH6CMtvIuopUTerNHU3m+xVaMn+yVdQhOtY0Jt3Y+c1dELvs45OfVlhbDEclVZ2eLGzGWWjmIO",
+ "BjPMSCEZ16hYlHqW/I2kCyppatjfpA/cZPrFk0hZ6WZ5U74f4DeOdwkK5CqOetlD9l6GcN+Su1zwZGk4",
+ "SnavzvYITmWvMzfutuvzHW4feqhQZkZJesmtbJAbDTj1lQiPbxnwiqRYrWcvetx7ZTdOmaWMkwctzQ79",
+ "/PqFkzKWQsZqBdbH3UkcErRksMLYvfgmmTGvuBcyH7QLV4H+43oevMgZiGX+LMcUga9FRDv1pc4rS7qL",
+ "VY9YB/qOqXlgyGDqhhqTZlnpm+ejh4mCinu6vGG769gyTzwe8I82Ij4yueAG1r58u5IeQgnK6kdJJque",
+ "Bz52Sr4W66GE0zqFnng+ARRFUVKyPPulzvxsdS2QlKeLqM9saj78re6vVi3O3oHRsn8Lyjnk0eGsvPmb",
+ "l0sjkvM/xdB5lowPfLfdSMEut7W4GvAmmB4oP6FBL9O5mSDEajOprgrazuciIzhPXWOuPq7dBhxBmfR/",
+ "laB0LEEJH9jAMbSNGnZgq3QT4BlqpBPynW2hvADSKCCEmqCvFNHMmi6LXNBsjBUszr45eUHsrPYb2yXI",
+ "VgmfoyLUXEXLJhaUzxwWguwb/sTTI4aPsz1e26xa6aQq6h1LQDVv1GXHWctPgCpSiJ0JeR40Q7W5qmYI",
+ "Qw8zJpdGq6tGs/IR0oT5j9Y0XaDa12Ct/SQ/vLy9p0oVtJSsWkNVNSXx3Bm4XYV7W+B+TITRzS+Ysp1z",
+ "YQXNnNcqAdyZHXwObHN5suTcUspkj1uuqiC5L9o9cPaK9K6EKGQtxO8p9NvuEPtW+3+DX0VLXLVbB3R6",
+ "SdoMyqrlj++InlIuOEuxwFTsinYtdof42QbU4mobcv0Rdyc0criiDQuqUDyHxd4WBp4ROsR1Df3BU7Op",
+ "ljrsnxp7uS6oJnPQynE2yMa+74azNTKuwNUIxYbMAZ8UsuG7RA4ZdYcnldtkTzLC1Jse5fFb8+ylMy1g",
+ "TPo546hEOLQ5wc9aA7EDqDaaB9NkLkC59TTzj9Wv5psJpuJmsH438R1DcQzr+jPLtn7u7lAn3uvtvMzm",
+ "3WfmXVcgqfq5EeVsJz0pCjdpf1eWqDyg17wXwRHvZeLdRwFyq/HD0baQ29ZwFbxPDaHBCp3dUOA93CGM",
+ "qkNJq/uVEVotReEbxIaJRaskMB4B4wXjUPezjVwQafRKwI3B89rznUol1VYEHMTTzoDm6OGOMTSlnXvj",
+ "qkO1y0MZlOAa/Rz921g3V+lhHNULteBG+aZqo2uoOxAmnmH/bofIbqsUlKqcEJVh1kKreUqMcRjG7dsz",
+ "NS+A7jHoykT2cy2pPTn73ER9iajTMpuDTmiWxUq2fo1PCT4lWYmSA6whLavSnkVBUqy70ixE06U2N1Eq",
+ "uCqXW+byL1xxuqAbUYQawo5Ifocx0WW6wX9jdS37d8YFeuwdauijOrL9qi91QydjUq+h6USxeTIcE3in",
+ "XB0d9dSXI/T6+4NSei7mTUBuuPzENi4X7lGMv31jLo6wOkOnWKu9WqriCRjYJ3wPSVQbq7TfJlfCq6xT",
+ "vRUdSlWPuu0GiP5uc2O8/HrCe4OiG9Ter9ZD2Rfkm/bGpFPtsuM0JVtZUG/GkY0QsrlFCEXcOtsXFWSD",
+ "gszjztfDJMOOnK3jhQ8DhPpwsy5AP/hYVlJQ5tzvNbPoYtZFvXfzEIbEw9Yb3F6EiyXvtdj9sOqL+/bF",
+ "2PB5uxvVObiU+ULCionSO7Z95JNXCe2vjd5OVeR9dP1dwytO9XHNob3G2zPXFcAu0+nkP/xi4+QIcC03",
+ "n4Apt7PpnT5XXWnXmqfqV0hVUHpQgenGrTikUGGsJp6TDRudtnb0CeuQ1fMh4kC379d4dJrtdWHG6iqO",
+ "7CixYxfv4tVfdqouNYVHrBCK1XXdY+29BoYYnmGHrqBsVncsH9+zglRjMf86bkEC7FNEy0wWNAy9LT/V",
+ "o05XkZiu6tS2UlPdCv477vhONliQ0Wirn0+GF1Y6qaLTkE9jNeQ5cNezs5nnMTjafDaDVLPVjuy7vy+A",
+ "B5ldY2+Xsb23g2Q8VkUvY/GW/a2ONUDbkuO2whMUUbwyOH25N+ewuaNIgxqi5djH/qq9TN0OxAByh8SQ",
+ "iFCx6A9rSHYOeaYqykAs+Ggr+znUFdB6OzkFuaSXnMuTpLk46vzSLVPGW8kMmst8ulfWNQbi9iXodTtR",
+ "9Osfz7Hxh6q6LPq6H6GWTk671REvXN0QzJWsfCe+gggo/5tPjLaz5Owcwl5T6Km6oDLzb0RNL96qk2y5",
+ "jzpZdb6LQhvoWTUzq2Nju3lUkXpbGAGd5sKIEUlfGHkzHLWK5bijbNCNLf+OgbYGrhlI15MP5d9cKEi0",
+ "8LG02+DYhgobWXQpJKjeGpcWuN7KM6/r0jpY65dipRnqAorCBRIJS2qgk0EBnP45tyH7mX3uE4d8rded",
+ "FqaKXnc3HfBR0Ux1kBhS/Yy423J3QtJljE2Mc9v3WcWq4XCQTW9IIUVWpvaCDg9GZZAbXGtqCyuJ2mnS",
+ "7ipbOkKQ1XkOmyOrBPluDX4HQ6Ct5GRBD6ootDb5oOY3FYN7fhDwPqblajwqhMiTHmfHabeET5viz1l6",
+ "DhkxN4WPHuzpfEPuoo298mZfLDa+ZE1RAIfs3oSQE27jtb1ju1lDujU5v6O3zb/GWbPSVtVyRrXJWx4P",
+ "fMV6V/KK3MwPs52HKTCs7opT2UF2FIhZ95QPkvQi0gdqMlQr77qa2715aqKyUMRkkrrtzI44mSpEpu78",
+ "UYfJdKWDPBcXCVJRUtX/iukc5r0mk/QVT+vPDLanEMTbUOUu0A1Z0IykQkpIwy/iKQ4WqKWQkOQCw29i",
+ "nsGZNvLQEuOaOcnFnIjCqLm2jJ73oUTb0gRz2TRb+2ViHTU9hQxAubRaN419uTvPlu41+3fGOVtE7C2I",
+ "aI/lvdvfOELZu2tFAOYAAt1tazqJdfdprqvdH6qvW5sWS5bG0f15RZn0xobs6F0UWV9Fjq61ks8K7MFV",
+ "1GW73UNq+9BNh/pJq5rJA49FAEC/57QBwyD/6b5gzLCvY0IjSD6tpNZxo+0ua519X8/O0nhKrda6AGLG",
+ "LiW4LDXbgK7VOaegeuFvMfN6V7c0egooTCGz7T+ospYQb5Fx3e/a4oEokhxW0HAou9S5Mk1BKbaCsHOe",
+ "/ZhkAAXaJ9tSc8xTGnK5lijl1p4EvrYh2I3KVhaxdqfIDsEpKuateWKPiRp6lAxEK5aVtIE/dYVeZH1t",
+ "yCJs2MM6kFPszSTii9vGInbGNiDNR88lj4c2hJmblVEEZ8sq46klwvpkq4Je8H4lImJ3qvztV18HwcGI",
+ "amVS9175stqVyyqQvZSxjTA6/QOjMocC3/81LHrixS33bUTGsqYupiIDMFWfZ4zegzo6LHhtSTckY7MZ",
+ "SGvMV5ryjMosfJ1xkoLUlBnNZqMuL9YaaGUJ452SreGuOKhnMDEZF+1SFpB841SGK0id6LmJSJz2qtWi",
+ "r0ViZ1fi6QR0baRrjKvqIQKXCI2ytT1ggqOARJb0HPacR7E/YPs0WJ7E2f60wFmHTBHztV6yttog1t0N",
+ "Q4jcbkEzxO2eobD0Yp3TJW00C1qS/QXZpvEf64tzWFtG/8EO8EKHYdCY0dtuHDgfOTnqxwopwVLe9VFC",
+ "Y/m7fJBugbWkEWyRYwRagy2EawPqm/sSOJjVs8pv29dDtO3exTqLgtsmfx23sOVNtmtfQDjmLMgVzW/e",
+ "tYsFOE8QH5C97jcGh77BEMkWlepymQkv6KC5Az/g4abmr9AV/XcwexTVSt1QToSpxHofzIM3C82t4WLm",
+ "W3itgJMLHNPGsT38gkxd5nYhIWWqLRpd+O4alSsMm025bJC13uF727XOX4S+AhnPvKZBXtaV+lHHn/Ma",
+ "wvqIfmSm0nNyo1Qeo74OWUTwF+NRYQm1HdfFeSPAzXY+aWVuCAkHDnQLQtb3DHTrFocbujwbzGUunVJB",
+ "d52Db+sGbiMXdb22oVGaXeRuK+c+JLgy3qXBfI7RnRYh2OKEIKjk94e/Ewkz7GEoyP37OMH9+2P36u+P",
+ "mo/Ncb5/Pyqd3Vhcp8WRG8PNG6OYX/oy/Ww2W09SaWs/SpZnuwijkSJcdwHFJNjfXCGCj9KH9Dcba9I9",
+ "qq4X3BUC5CxiImttTB5MFST/Dsj7dZ9FsnzRj5OWkukN1kf09gP2WzQC9bsqmslFw1X6obv7tDiHqsJm",
+ "HftUKn+7fidojveRVVu5uYVEPiHfrOmyyMEdlK/uTP8THv/tSfbg8cP/nP7twdMHKTx5+uWDB/TLJ/Th",
+ "l48fwqO/PX3yAB7Ovvhy+ih79OTR9MmjJ188/TJ9/OTh9MkXX/7nHcOHDMgW0JGvxjP6H2zWm5y8Ok3O",
+ "DLA1TmjBfoCN7QtoyNh3HKQpnkRYUpaPjv1P/8efsEkqlvXw/teRK/YxWmhdqOOjo4uLi0n4ydEcgx0S",
+ "Lcp0ceTn6bQkPHl1WnmJrBUId9TmyXrrnieFE3z2+ps3Z+Tk1ekk6Fd/PHoweTB5iO3NC+C0YKPj0WP8",
+ "CU/PAvf9yBHb6Pj9h/HoaAE0x9hA88cStGSpfySBZhv3f3VB53OQE9eG0fy0enTkxYqj9y7o44OZIapP",
+ "2xTxIC+4253QBZChMcqmgDe6/SjXfGZc9YByxkaeYeaujaMwbK5C3GlWNzs4rZmWL/loa2Af/xoJxJ2x",
+ "OZoefCXCRodI1yCOKfLfb356SYQkTr15RdPzym9BTme2fJcUK4YJoVmQRWy+nHj6/VcJclPTl+N8YX1n",
+ "39LHOUCWal40c9JqqSrmo4l1gsSZDVkEhF2FaNWMC000Yf/cig0b1vog+fLd+6d/+zAaAAjGCyrA6l+/",
+ "0zz/nVwwbCiI9kVfP9PVRxtH2tegND2uQ37wg3onx5hUVz0NOxRW7zRTuX/ngsPvfdvgAIvuA81z86Lg",
+ "ENuDd1ifCokFz9yjBw8O1tq0ql5gvTTVKJ4kLjFQlyHZR1WL1AtJC3sWfYdTdHSjKuwXig1dnxxwoc3c",
+ "oysvtz1cZ9Ff0wy7xoHSdikPP9ulnHIM2TUXBLEX4Ifx6OlnvDen3PAcmhN8Myj+2L1ofubnXFxw/6YR",
+ "fsrlksoNijZBa8tWZRQ6VxgPgizSnu1GM7vRuw+9t95R2Kvr6H0j6jO70p3YaVN4+nzHNXlH9XHObun0",
+ "Visw87zq9IRxga7fGfaeUvcm5Lvwa+TeWInM1vkqJYfMB236W68qreoLttaw3VFhkbbopR2Yi2/v7499",
+ "f580jR2N8twxYBqnYCtMHa/CVS/Qrqu01ez5Us2Ug6Zcl2htcq0dJ1u6pp3pXUwV3Mmob3HXg7s+MSmA",
+ "t5KYms3Urp81+yTB6iZpXBnXyLg/c6HvR5obOgmW2yrGY2vW3wqDfxlhsEommlvpzLVpuZp4iA0bj977",
+ "PgQHEAldH4YBwmCoVgffBlEfd1vs5N7ENhUI37kcz3DZQzvFPOwOcSvgfQICXrfzSgyMup/GxxPqEIZF",
+ "3ZplZxcY31Sl3Ut/rxYyn6kU9xdGVq/YZiDdLbBdgn12hDHHrK+Nrf4phTCHtFvx6y8tflU5vVcSwBq9",
+ "k1yWeODGupL1rm2dY7qSxJp53QFnw5Bqw1DcER7XfR4Ni8GymL4imhp7zRDdqVZptJs17uiNXRHrOwgV",
+ "1K83p893SVefkZ1ncHnmyC0Q35vr5qVRt8Prm3E7DONNTx48uTkIwl14KTT5Fm/xa+aQ18rS4mS1Lwvb",
+ "xpGOprY5xTauxFtsCRlF3XQi4FFVQYtx8Ny8baM07rrO6mFBr3sT4lthqKqxl0vsmgvDqHxOCZVz+5Hh",
+ "dQYZ5I7/8xjHvzMh32JCj1ZjDDbTrusTucO4Pn746PET94qkFzaWq/3e9IsnxydffeVeqxufWD2n87rS",
+ "8ngBeS7cB+6O6I5rHhz/zz/+dzKZ3NnJVsX6681LWwH4U+GtXfUuJIC+3frMNymmrfteHrtQdyPu+6/F",
+ "OnoLiPXtLfTRbiGD/T/F7TNtkpFTRCtLZqNG0AFvI3tM9rmPxr7Jh+E71WUyIS+FK9dW5lQSITOQrhPi",
+ "vKSScg2QTTylYtKpsuWp0pwB10ZxxN5uMlEsA1vlZl5KqNLnCgkrjJHH6VGnb0Cwm9FjJO0ny+R/pOug",
+ "hNO0uqa1cEtGs+eSrn13SeyfJiT+9NVX5MG41l7y3AyQVIiJMdclXY9u0OpXEdug+PNmc6adAbo49hAL",
+ "Ui39VDm0YSeYvzbn/mwld0vubmMPxDn3dvzUjp3QjuCKom21IFjBzvaexGaImzob30h5XoSKszgzw1Dj",
+ "wCfsI9hpmo4qoW303h7iWyPAlVhJm6D2ZBuYdaqO3qNeHvKMzrnFrLm/lrs08B1JsfTOI0FmoNOFS9ht",
+ "oT7CnnxrqH7etK35+KGlGtzFbtWLsCY1NsUeWPYsyKVEBx7ICBH/5Ls0mMdsZgvM+IJMvsc+uqaYbztb",
+ "dZx1fbldqQktqrxes4t7QfmsnrwrkCFaDuH/vEXwfgjuMMdvfOtRxJhbxJ8h4t+rkgl5Keq0cdf16s/o",
+ "erzOm/26F/RScLA+diP5Wlq8dadWYodhHBYpvl6I1V+q/iOXFkGOfBPdrXLI97aF7VZZZMjtbSb7LK/w",
+ "7x2WttwyZm2TncUQ6tGGMGfzoq2C1eyI8RG1mI/CTz9B1eZjcKybYTF4SD2fcWIBPyzTwRI8lpiPqmYI",
+ "fRwo3l9mMDfSogpDi7aEmUIu+Fx9mqxoa6efKF4iVFJ13om31/nrnd1nWN3HqLw2AtLVe1KMp2CbRGN/",
+ "O6bIkinlgiWfPPjbzUGo2dJXFOdh7upH5i5PHzy+uenfgFyxFMgZLAshqWT5hvzMq4beV+F22Dyoqr/m",
+ "rcHRflHobWrWBUvDIkaXZ4KN0LX3es2yD7uZYVCxck8+yHjAB8PyhrQogMrLM8Ddrqt2ue3T52F0cKOn",
+ "TVVRKwKKQdGeAfL/MRpod8K0dzFzl1/JLaC++pdjEy50V8zGVXCMkQLE7Ji85feJWtCnDx/99ujpF/7P",
+ "R0+/6LGcmXlc0Z6u7aweyDy2wwwxoH3W5sDDSu0Vfo9verf328TxiGXraNeLuo9dp+i1E8vuKFLQTW9r",
+ "nGJHH75w2Lon380XO1SaTRdR/cqrP1VZ+VP+daUF24p8rn3dbf+9nuSJgM8YQqsb8VVY396Tb4s02SLL",
+ "qvnZTSundZKBveg88mTrzvmogq7+WEpqgjoqcC/YNNHy8WRK7MwyDtzdhRRapCK3sStlUQipq9OtJoPE",
+ "Pehz2zWkvT7C3UuYS6lOF2Vx9B7/gxW+PtSJB7YB+5Fe8yOsHn30fmuIAIIYaQ1r5dJoS4WumjygM+2u",
+ "EIDWiRm3D5GthI2xBBH57Hqks7+0ULNfp9+rmrQjI3YOcJVXF3TRrmg3KPy9o7nx5NYF84ktqDaKzBjP",
+ "CA22saW7CVkzgms2jFz3oj+GneXm/U5PP+Nz9lJocrosbMMcyK4WvUPaHM7fHluv2/0EA3f1d0N8und+",
+ "eOP7wMTKur7zgt/DIRekYoOfjkrMjTZ39fXYvm9v8k/7Jn/mSw43yPD2Xv587mXpwylvr+BP/wp+/Nmu",
+ "5hodMQOvZH8TXfoarjXxPS/kSJdQNBm0XOHb/DSoerdXqb4V0re3uL3FP1Mng93JwUlLQyw0u1KZ3JSH",
+ "CJ39pKAfZmfI84iloe+gjm2vH70AhkVnRMqwfvhppsb2EDvjhDvFt4LPJy34BHt9K/fcmh4+M9NDj5Tj",
+ "tP5mk9Y+QWNfAWi1FBn4qBMxm7kib33ST7P3jCFPpemyIPbLqJSD3tgztoQ35s2f7BQHvWJrsFtiUQs8",
+ "gywFqeCZGuAVdaNe9h5CN24/ADfuAa12wMPi0r8nlybZ10ENmQ4lkDbyFfYM8sXuHDIyWBFDgJMDkO3R",
+ "e/svmtMKoWJdlz0BdzbmrtsWW73PjtsAkLxCIdSWAfRfiRl5YIv4lRwzdermgJRnRMuNEVR9zRIJNCdp",
+ "I0K/gqN7ct70npydqkBndT1riusCoj6hhwxnbWVH/XDjB+AZ5Y7kuwjSglDCYU41W4GPW5/cZtRf+jZz",
+ "+exbGOCY0Cyzp7HeBFiB3BBVTpWRdXgz0PKOap6XPRgGrAuQzFzRNK8d8FZNOLLp8tsCKt/YN654abV4",
+ "kU3Sl80oIH+zuhR+MSM/slSKk3wulI/rUhulYdlpvec+/a2n6Ko3JHRjwATPGYdkKXisIdxP+PRHfBjt",
+ "py40zfs+PjMP+75t3bdN+FtgNecZcidfFb+fyOm/Uq5Ga7USCiG1b9IPTk3e8yj5Q7PhafckbXgaOLXc",
+ "w2CgsH1c4+ej940/XbEM96ZalDoTF8G3qNnboJ8hefJBo+pLWNJaDZ/V9drSrtOHFOAhdmKqp5HWX0E7",
+ "8t7uX3/R/BDncgmJBEM3U7ECqVrq2W2SyJ8qSWTwvu/FY22ry10crVSHlUheigzsuM1Os7H6zFxk4Dpy",
+ "dgWRKtgxHljvb6X6vVaoc0rL+UKTsiBaxIKq6w8Tmlomm1j1Jj5hUBHNKkE43YKugNAc+5ySKQAnYmoW",
+ "Xd+PuEiqsCadj8x2IZ1RUSiAq5AiBaUgS3w96l2gVX1OMY5bb8ETAo4AV7MQJciMyisDe77aCWfVJ1yR",
+ "uz/8YhTmG4fXioLbEWsrYUXQW1XbcNJeF+ph028juPbkIdlRCcSLBphIIpZFDi6VJILCvXDSu39tiDq7",
+ "eHW0YK4Fu2aK95NcjYAqUK+Z3q8KbVkk5v7ugvjMPj1jS5TEOOXC2xVjg+VU6WQXWzYvhWtRZgUBJ4xx",
+ "Yhy4R+F8QZV+7bIKM6xAY68TnMfK2GaKfoBXff3ozci/VN3oO2On5j7kqlRVy3qXKQBZbA0c1lvmegnr",
+ "ai5M6/RjV6kI1sK3a+Q+LAXjO2QFRbkJ1YE33wwXWRzaH6kzUHRR2QCiRsQ2QN74twLshm78HkCYqhFt",
+ "CQeLjIaUMxUiB8ptRpcoCsMtdFLy6rs+NL2xb5/on+t3u8RFdX1vZwJUmCbiIL+wmFVooF1QRRwcZEnP",
+ "XSbJ3DVZ6sJsDmOCGeDJNspHk615KzwCOw9pWcwlzSDJIKcRU8rP9jGxj7cNgDvuyTNZCQ3JFGZCQnzT",
+ "a0qWvSaiamiB46mY8EjwCUnNETTKc00g7usdI2eAY8eYk6OjO9VQOFd0i/x4uGy71T1mKTOG2XFHDwiy",
+ "4+hDAO7BQzX05VGBHye1+aA9xT9AuQkqOWL/STag+pZQj7/XAtrmvPACa9wULfbe4sBRttnLxnbwkb4j",
+ "GzMgfpbG/nbs0jVWf2kaUAMFcHIZ5fbogjKdzIS0gnRCZxrkzoD4v1Pm3eHONaCFq01AcAR3b7pxkMmH",
+ "rS4cF7EgEHddGBLp+t/MVN8KOajEZrOQDGWalFyzPCgzXqnKn57B8NYIcGsEuDUC3BoBbo0At0aAWyPA",
+ "rRHg1ghwawS4NQLcGgH+ukaAj1U0N/EShy8lxgVP2lGJ5DYq8U9VZLK6q7xRAs0YF5Rp1zXT5/u7J1er",
+ "sauB5ogDlkN/nLQN3zz75uQFUaKUKZDUQMg4KXJqdANY66qHW7M7qO9bbBtB2sajVMHjR+TN9ye+Ft7C",
+ "1Wxrvnv3xPX/VnqTwz3XJQF4ZkVR3y4BuEG665ZA/Z3ge725zncsxxhzRb7Bt5/DCnJRgLRltoiWZcTk",
+ "cwY0f+Zws8Pi83czuQta/d2M9vu4YWhyaFvSwsv5fq1UEWpzF8nzIJvx9xnNFfzel9Box1vSItZurbr5",
+ "rC0IucnXItu0TojZtSPcwObZqCviMU7lJlJvqZtM0CYNLQy/coTVNWZ9OHjdxi7RdslsF4XFxHUJKnqO",
+ "t1F5tGBhtWGdoWzK66xFJ6NYtma7St+oAnBICOwZJhzYPSGv7Xcftyo8QuSOWM3MP5nIweabFdPAd40W",
+ "4VjP5xqV7xEfPb149seGsLMyBcK0Ir704+7rZTxaJ2akOfDEMaBkKrJN0mBfo8YtlDFFlYLldPdNFPJP",
+ "12DYXT7myfZ76uNcI8+DxW3jySHRrBPHgHu480bDYN5cYQtHdOw5wPh1s+g+NhqCQBx/ilmVWrxvX6ZX",
+ "T7O5ZXy3jC84jS2JgHFXKrfNRCbXyPjkRpa8n+d9s4a0NMCFJ/kumufRJwdr3XBsZjAt53NslNxx0pml",
+ "AY7HBP9IrNAudygX3I+C7OBV88yrpnu3h+tylyAD+66vcXgPt4PyDXozlgXlG+/zhUSxZZlbHNoec4dl",
+ "tLaabTcSAP2xzvjXZ9Z+5W1+gfHWXbXN3y1ayAVVxO4vZKTkmcsd6tS8XvPhFUPs0GdrXrPprdVB7Hoj",
+ "q3PzDrki/C43k7YVKUAmes3tgWp2Ure1te3Jndw2iP1rXBs25Rt6GGy3TnTNEA50e8iAr+H1EXQDqZPh",
+ "Gj1C0GrRnzoStgaxbx40eqQzfDOIpDapOCcp5AWhvnt/KrjSskz1W07RSRMsbNINMPHW6H7+9sy/EvcT",
+ "Rtx4bqi3nGJz98p1E+VzM4j4Kb4F8GxUlfM5KMMrQyKZAbzl7i3GScmNpiVmZMlSKRKbiGrOkJFPJvbN",
+ "Jd2QGdb/EOQPkIJMzc0e7Lo1GCvN8txFtJhpiJi95VSTHKjS5EdmuKwZzhcfqEK5QF8IeV5hId4pYg4c",
+ "FFNJ3PjynX2KzRjc8r2RDw2W9nFdRP1muzB42FnWC/npcwM3xdrFOVO6DoLowH5jDvAl40mUyM4WQFxM",
+ "WJu2yF2smOYI6F7TO6QX8JabG04Lglyd6suRQ9vN0zmL9nS0qKaxES1vkF/rIBXvIFyGRJjMrWvlT5Sa",
+ "GdCBd1/ixttq9K2939ON0rhygWfmac+FbJ+65l09LzkloWEIa5WDcW+cNUD+8zZ+f3c9+qJH48E0xu6A",
+ "XXbVbM+EePMbPiY0F3xuqxAaDVLgPjFelBoDq6/TSAcrmidiBVKyDNTAlTLBv1nR/Kfqsx0XYNBcbrmE",
+ "jFEN+YYUElLIbJ0spkitJE9spQGSLiif410pRTlf2NfsOBcgoerDZfTS9hDxOiVrntiaaV0YT4g1MIZl",
+ "ZYGmi0hfE7xRjCLsd9CWgRii6kaOMFbE7NN8x6NeydYgdVUHpFnkNM/1gGu7cQEH+KknPkQJ0Vsqu6Wy",
+ "vaksVmIPUTdr6dwWX+G2XLNx5roLSt6greejVJu9Ldn+Zy/Z7jmQIpRI2pCy473CqCJMkwssqDMFYi6M",
+ "Em3MrqW400gnxDCkwJ5uKy8q1+kyXVDGXTWWKjwf4dCuG6/27f+uxTxnmRna5Qw6IC0l0xuUy2nBfjsH",
+ "8/93RrBVIFdeZC9lPjoeLbQujo+OcpHSfCGUPhp9GIfPVOvhuwr+917aLiRbGQ3iw7sP/z8AAP//R7K1",
+ "ReZ0AQA=",
}
// GetSwagger returns the content of the embedded swagger specification file
diff --git a/daemon/algod/api/server/v2/generated/participating/private/routes.go b/daemon/algod/api/server/v2/generated/participating/private/routes.go
index b513cffac..1e585cd01 100644
--- a/daemon/algod/api/server/v2/generated/participating/private/routes.go
+++ b/daemon/algod/api/server/v2/generated/participating/private/routes.go
@@ -158,173 +158,183 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+y9e3PcNrIo/lVQc06VH7+hJD+SXasqdX6ynXh143hdlpK951i+CYbsmcGKBLgAKM3E",
- "V9/9FhoACZIgh3pE3lT5L1tDPBqNRqPf+DxLRVEKDlyr2eHnWUklLUCDxL9omoqK64Rl5q8MVCpZqZng",
- "s0P/jSgtGV/N5jNmfi2pXs/mM04LaNqY/vOZhH9VTEI2O9SygvlMpWsoqBlYb0vTuh5pk6xE4oY4skMc",
- "v55djXygWSZBqT6Uf+f5ljCe5lUGREvKFU3NJ0UumV4TvWaKuM6EcSI4ELEket1qTJYM8kzt+UX+qwK5",
- "DVbpJh9e0lUDYiJFDn04X4liwTh4qKAGqt4QogXJYImN1lQTM4OB1TfUgiigMl2TpZA7QLVAhPACr4rZ",
- "4ceZAp6BxN1KgV3gf5cS4HdINJUr0LNP89jilhpkolkRWdqxw74EVeVaEWyLa1yxC+DE9NojP1VKkwUQ",
- "ysmHH16RZ8+evTALKajWkDkiG1xVM3u4Jtt9djjLqAb/uU9rNF8JSXmW1O0//PAK5z9xC5zaiioF8cNy",
- "ZL6Q49dDC/AdIyTEuIYV7kOL+k2PyKFofl7AUkiYuCe28Z1uSjj/F92VlOp0XQrGdWRfCH4l9nOUhwXd",
- "x3hYDUCrfWkwJc2gHw+SF58+P5k/Obj6j49Hyf+4P795djVx+a/qcXdgINowraQEnm6TlQSKp2VNeR8f",
- "Hxw9qLWo8oys6QVuPi2Q1bu+xPS1rPOC5pWhE5ZKcZSvhCLUkVEGS1rlmviJScVzw6bMaI7aCVOklOKC",
- "ZZDNDfe9XLN0TVKq7BDYjlyyPDc0WCnIhmgtvrqRw3QVosTAdSN84IL+fZHRrGsHJmCD3CBJc6Eg0WLH",
- "9eRvHMozEl4ozV2lrndZkdM1EJzcfLCXLeKOG5rO8y3RuK8ZoYpQ4q+mOWFLshUVucTNydk59nerMVgr",
- "iEEabk7rHjWHdwh9PWREkLcQIgfKEXn+3PVRxpdsVUlQ5HINeu3uPAmqFFwBEYt/QqrNtv+vk7+/I0KS",
- "n0ApuoL3ND0nwFORQbZHjpeECx2QhqMlxKHpObQOB1fskv+nEoYmCrUqaXoev9FzVrDIqn6iG1ZUBeFV",
- "sQBpttRfIVoQCbqSfAggO+IOUizopj/pqax4ivvfTNuS5Qy1MVXmdIsIK+jmu4O5A0cRmuekBJ4xviJ6",
- "wwflODP3bvASKSqeTRBztNnT4GJVJaRsySAj9SgjkLhpdsHD+PXgaYSvABw/yCA49Sw7wOGwidCMOd3m",
- "CynpCgKS2SM/O+aGX7U4B14TOlls8VMp4YKJStWdBmDEqcclcC40JKWEJYvQ2IlDh2Ewto3jwIWTgVLB",
- "NWUcMsOcEWihwTKrQZiCCcf1nf4tvqAKvn0+dMc3Xyfu/lJ0d310xyftNjZK7JGMXJ3mqzuwccmq1X+C",
- "fhjOrdgqsT/3NpKtTs1ts2Q53kT/NPvn0VApZAItRPi7SbEVp7qScHjGH5u/SEJONOUZlZn5pbA//VTl",
- "mp2wlfkptz+9FSuWnrDVADJrWKMKF3Yr7D9mvDg71puoXvFWiPOqDBeUthTXxZYcvx7aZDvmdQnzqNZ2",
- "Q8XjdOOVkev20Jt6IweAHMRdSU3Dc9hKMNDSdIn/bJZIT3Qpfzf/lGVueutyGUOtoWN3JaP5wJkVjsoy",
- "Zyk1SPzgPpuvhgmAVSRo02IfL9TDzwGIpRQlSM3soLQsk1ykNE+UphpH+k8Jy9nh7D/2G/vLvu2u9oPJ",
- "35peJ9jJiKxWDEpoWV5jjPdG9FEjzMIwaPyEbMKyPRSaGLebaEiJGRacwwXleq9RWVr8oD7AH91MDb6t",
- "tGPx3VHBBhFObMMFKCsB24YPFAlQTxCtBNGKAukqF4v6h4dHZdlgEL8flaXFB0qPwFAwgw1TWj3C5dPm",
- "JIXzHL/eI2/CsVEUFzzfmsvBihrmbli6W8vdYrVtya2hGfGBIridQu6ZrfFoMGL+XVAcqhVrkRupZyet",
- "mMZ/c21DMjO/T+r85yCxELfDxIWKlsOc1XHwl0C5edihnD7hOHPPHjnq9r0Z2ZhR4gRzI1oZ3U877gge",
- "axReSlpaAN0Xe5cyjkqabWRhvSU3ncjoojAHZzigNYTqxmdt53mIQoKk0IHhZS7S879Rtb6DM7/wY/WP",
- "H05D1kAzkGRN1XpvFpMywuPVjDbliJmGqOCTRTDVXr3Eu1rejqVlVNNgaQ7euFhiUY/9kOmBjOguf8f/",
- "0JyYz+ZsG9Zvh90jp8jAlD3OzsmQGW3fKgh2JtMArRCCFFbBJ0brvhaUr5rJ4/s0aY++tzYFt0NuEbhD",
- "YnPnx+Cl2MRgeCk2vSMgNqDugj7MOChGaijUBPheO8gE7r9DH5WSbvtIxrGnINks0IiuCk8DD298M0tj",
- "nD1aCHkz7tNhK5w0JmdCzagB8513kIRNqzJxpBgxW9kGnYEaL9840+gOH8NYCwsnmv4BWFBm1LvAQnug",
- "u8aCKEqWwx2Q/jrK9BdUwbOn5ORvR988efrr02++NSRZSrGStCCLrQZFHjrdjCi9zeFRf2WoHVW5jo/+",
- "7XNvqGyPGxtHiUqmUNCyP5Q1gFoRyDYjpl0fa20046prAKcczlMwnNyinVjbvgHtNVNGwioWd7IZQwjL",
- "mlky4iDJYCcxXXd5zTTbcIlyK6u7UGVBSiEj9jU8YlqkIk8uQComIt6U964FcS28eFt2f7fQkkuqiJkb",
- "Tb8VR4EiQll6w6fzfTv06YY3uBnl/Ha9kdW5eafsSxv53pKoSAky0RtOMlhUq5YmtJSiIJRk2BHv6Deg",
- "T7Y8RavaXRDpsJpWMI4mfrXlaaCzmY3KIVu1NuH2ulkXK94+Z6d6oCLgGHS8xc+o1r+GXNM7l1+6E8Rg",
- "f+U30gJLMtMQteC3bLXWgYD5XgqxvHsYY7PEAMUPVjzPTZ++kP5OZGAWW6k7uIybwRpaN3saUjhdiEoT",
- "SrjIAC0qlYpf0wOee3QZoqdThze/XluJewGGkFJamdVWJUE/Xo9zNB0TmlrqTRA1asCLUbufbCs7nfUK",
- "5xJoZrR64EQsnKvAOTFwkRSdkNpfdE5IiJylFlylFCkoBVniTBQ7QfPtLBPRI3hCwBHgehaiBFlSeWtg",
- "zy92wnkO2wRd5oo8/PEX9egLwKuFpvkOxGKbGHprhc/5g/pQT5t+jOC6k4dkRyUQz3ONdmkYRA4ahlB4",
- "LZwM7l8Xot4u3h4tFyDRM/OHUryf5HYEVIP6B9P7baGtyoFAMKfonLIC7XaccqEgFTxT0cFyqnSyiy2b",
- "Ri1tzKwg4IQxTowDDwglb6nS1pvIeIZGEHud4DxWQDFTDAM8KJCakX/xsmh/7NTcg1xVqhZMVVWWQmrI",
- "YmvgsBmZ6x1s6rnEMhi7ln61IJWCXSMPYSkY3yHLrsQiiOra6O7c7f3FoWna3PPbKCpbQDSIGAPkxLcK",
- "sBsGwwwAwlSDaEs4THUop47Amc+UFmVpuIVOKl73G0LTiW19pH9u2vaJi+rm3s4EKIzBce0d5JcWszYM",
- "ak2NCo0jk4KeG9kDFWLr9uzDbA5johhPIRmjfHMsT0yr8AjsPKRVuZI0gySDnG77g/5sPxP7eWwA3PFG",
- "8REaEhvPEt/0hpJ9+MDI0ALHUzHhkeAXkpojaDSPhkBc7x0jZ4Bjx5iTo6MH9VA4V3SL/Hi4bLvVkRHx",
- "NrwQ2uy4JQeE2DH0KfAOoKEe+eaYwM5Jo5Z1p/hvUG6CWoy4/iRbUENLaMa/1gIGjGkuUjg4Lh3u3mHA",
- "Ua45yMV2sJGhEztg2XtPpWYpK1HV+RG2d675dSeI+ptIBpqyHDISfLBaYBn2JzYQozvmzTTBSUaYPvg9",
- "K0xkOTlTKPG0gT+HLarc722E32kQF3gHqmxkVHM9UU4QUB83ZCTwsAlsaKrzrZHT9Bq25BIkEFUtCqa1",
- "jdxta7palEk4QNTAPTKj8+bY6Di/A1PcSyc4VLC8/lbMZ1YlGIfvtKMXtNDhVIFSiHyC8aiHjCgEkxz/",
- "pBRm15kLIvZhpJ6SWkA6po2uvPr2f6BaaMYVkP8WFUkpR42r0lCLNEKinIDyo5nBSGD1nM7F32AIcijA",
- "KpL45fHj7sIfP3Z7zhRZwqWPvDcNu+h4/BjNOO+F0q3DdQemQnPcjiPXB1r+8d5zwQsdnrLbxexGnrKT",
- "7zuD1+4Cc6aUcoRrln9rBtA5mZspaw9pZJp7HcedZNQPho6tG/f9hBVVTvVduC9G5dFan2BFARmjGvIt",
- "KSWkYKOrjYClLCwGNGLjrtI15SuUq6WoVi7wx46DjLFS1oIhK94bIip86A1PVlJUZYxRumBPH2BvxA6g",
- "RvMJEImdrZx/Sev5XE7FlBvMIzzYnTdmzCGvwnw2qBgapF40iqFFTjtLII4FTHtIVJWmANEQ4JjKVS+1",
- "kw3Z5Le4AY3YUEkbA0Voqiuah1RHjpeE8m07TZKyXBkuyBTBdqZzE1c7t2vzOSxLmlvfbCSpIjwpLYkv",
- "2PkGpV1UTPQ7IJEYaahPGSEBmuNlyPiPseE3Q8eg7E8cBF01H4firoz+nW/vQAyyAxEJpQSFl1Zot1L2",
- "q1iGuU/uVlNbpaHom/Zt118HGM2HQQVS8JxxSArBYRtN92UcfsKPUcaBF+dAZxRhhvp2tZIW/B2w2vNM",
- "ocbb4hd3O+BF7+uAwzvY/O64Ha9OmPWFVkvIS0JJmjO0aQqutKxSfcYpWk2CwxYJzPD64bAd7ZVvEjfc",
- "RexqbqgzTjEop7alRJ3JS4gYDn4A8OY0Va1WoDr8kywBzrhrxTipONM4V2H2K7EbVoLE6Ig927KgW8MC",
- "0ez3O0hBFpVu82TMPFHasEvrYjLTELE841STHIxO/RPjpxsczrtoPc1w0JdCntdYiF8hK+CgmEriASRv",
- "7FeM7XPLX7s4P8wUtp+tU8KM36SnbNGo0mS//p+H/3X48Sj5H5r8fpC8+P/2P31+fvXoce/Hp1ffffd/",
- "2z89u/ru0X/9Z2ynPOyxvAgH+fFrp6wdv0aJvPFK9GC/N4t0wXgSJbLQ996hLfIQcwAdAT1q22v0Gs64",
- "3nBDSBc0Z5kRuW5CDl0W1zuL9nR0qKa1ER37jF/rNeXcW3AZEmEyHdZ442u8H3MVz0BCN5lLKsLzsqy4",
- "3Uov6NoAex/7IpbzOsvMFqA4JJiCtKY+cMv9+fSbb2fzJnWo/j6bz9zXTxFKZtkmKh3CJqa+uAOCB+OB",
- "IiXdKhgQQBH2aJiPjTYIhy3A6L1qzcr75xRKs0Wcw/mwZWcG2fBjbuOJzflBp9vW2fLF8v7h1tLI4aVe",
- "xxLTW5ICtmp2E6ATCFFKcQF8Ttge7HXNEJlRzVzAUQ50iQnSqOiJKWkY9TmwhOapIsB6uJBJun6MflC4",
- "ddz6aj5zl7+6c3ncDRyDqztn7WHzf2tBHrz5/pTsO4apHthcRTt0kF0W0VpdAkUrRMZwM1uOwyZrnvEz",
- "/hqWjDPz/fCMZ1TT/QVVLFX7lQL5kuaUp7C3EuTQ52S8ppqe8Z6kNVgxJ8iGIWW1yFlKzkOJuCFPWwWh",
- "P8LZ2Uear8TZ2adetEBffnVTRfmLnSC5ZHotKp24HO5EwiWVMW+MqnN4cWRbpGFs1jlxY1tW7HLE3fhx",
- "nkfLUnVz+frLL8vcLD8gQ+Uy1cyWEaWF9LKIEVAsNLi/74S7GCS99CaMSoEivxW0/Mi4/kSSs+rg4BmQ",
- "VnLbb+7KNzS5LWGyIWMw17Brv8CFW70GNlrSpKSrmNfn7OyjBlri7qO8XKCSnecEu7WS6nzQMA7VLMDj",
- "Y3gDLBzXThDCxZ3YXr5eT3wJ+Am3ENsYcaNxRd90v4I0uxtvVydVr7dLlV4n5mxHV6UMifudqct4rIyQ",
- "5eMDFFthDKareLIAkq4hPXelKKAo9Xbe6u5DUJyg6VkHU7ZIiU2SwTR5tJkvgFRlRp0o3rUgLbZEgdY+",
- "CPQDnMP2VDRZ9tdJUG7ny6qhg4qUGkiXhljDY+vG6G6+i3NCE1dZ+rRTzD/yZHFY04XvM3yQrch7B4c4",
- "RhStfM4hRFAZQYQl/gEU3GChZrxbkX5seUbLWNibL1KwxPN+4po0ypMLSQpXgwZu+70ArHgkLhVZUCO3",
- "C1esx+aEBlysUnQFAxJy6LaYmHnZcnXgILvuvehNJ5bdC61330RBto0Ts+YopYD5YkgFlZlOIJqfyXrG",
- "nBMAa/A5hC1yFJPqiD3LdKhsuY9sUbEh0OIEDJI3AocHo42RULJZU+XrCGG5JX+WJ8kAf2CO81hli9Cg",
- "H9RUqu3rnud2z2lPu3T1LXxRC1/JIlQtJ1SlMBI+hm3HtkNwFIAyyGFlF24be0Jp8q2bDTJw/H25zBkH",
- "ksTCsahSImW2EFRzzbg5wMjHjwmxJmAyeYQYGQdgo8cXBybvRHg2+eo6QHKXL0792OgrDv6GeGqLDVA2",
- "Io8oDQtnAw6k1HMA6mL46vurE0mKwxDG58SwuQuaGzbnNL5mkF6BBRRbO+UUXMzBoyFxdsQCby+Wa63J",
- "XkU3WU0oM3mg4wLdCMQLsUlsbltU4l1sFobeozHbmGkXO5i2lMUDRRZig3EseLXYGOEdsAzD4cEINPwN",
- "U0iv2G/oNrfAjE07Lk3FqFAhyThzXk0uQ+LElKkHJJghcnkYVKe4EQAdY0dT6tUpvzuV1LZ40r/Mm1tt",
- "3lRd8ukwseM/dISiuzSAv74Vpq4n8b4rsUTtFO1wjHYpjUCEjBG9YRN9J03fFaQgB1QKkpYQlZzHXHdG",
- "twG8cU58t8B4gQU7KN8+CmJ8JKyY0tAY0X1IwpcwT1KsEybEcnh1upRLs74PQtTXlC1Egx1by7z3FWCM",
- "7JJJpRP0QESXYBr9oFCp/sE0jctK7SgiW1WTZXHegNOewzbJWF7F6dXN++NrM+27miWqaoH8lnEbG7LA",
- "KrDR2MKRqW346eiC39oFv6V3tt5pp8E0NRNLQy7tOf4k56LDecfYQYQAY8TR37VBlI4wyCAltM8dA7nJ",
- "Hk5MCd0bs772DlPmx94ZNuITU4fuKDtSdC2BwWB0FQzdREYsYToootrP1Rw4A7QsWbbp2ELtqIMaM72W",
- "wcOXnupgAXfXDbYDA4HdM5YuIkG1q4w1Ar4th9sq8rE3CTOn7VpgIUMIp2LKF3PvI6pOJ9uFq1Og+Y+w",
- "/cW0xeXMruaz25lOY7h2I+7A9ft6e6N4Rte8NaW1PCHXRDktSykuaJ44A/MQaUpx4UgTm3t79D2zurgZ",
- "8/T7o7fvHfhX81maA5VJLSoMrgrblX+aVdmCZgMHxBeLNjqfl9mtKBlsfl2FKTRKX67BVd0NpNFeecDG",
- "4RAcRWekXsYjhHaanJ1vxC5xxEcCZe0iacx31kPS9orQC8pybzfz0A5E8+DiptWYjHKFcIBbe1cCJ1ly",
- "p+ymd7rjp6Ohrh08KZxrpC5wYUtfKyJ414WO4cXb0nndC4rF/axVpM+ceFWgJSFROUvjNla+UIY4uPWd",
- "mcYEGw8Io2bEig24YnnFgrFMMzVB0e0AGcwRRaYvFDmEu4Vwz5pUnP2rAsIy4Np8kngqOwcVqyk6a3v/",
- "OjWyQ38uN7C10DfD30bGCAtbdm88BGJcwAg9dT1wX9cqs19obZHCcOvGJXENh384Y+9KHHHWO/pw1GyD",
- "F9dtj1v4Ckmf/xnCsOWodz+B4pVXV2FzYI7okyZMJUspfoe4nofqcSQVx5fyZBjl8jvwCTHnjXWneZml",
- "mX1wu4ekm9AK1Q5SGKB63PnALYc1Bb2FmnK71faFgVasW5xgwqjSfTt+QzAO5l4kbk4vFzRWcNEIGQam",
- "o8YB3LKla0F8Z497VSc22NlJ4Euu2zKbZV2CbLLk+hVbbigw2GkniwqNZIBUG8oEc+v/y5WIDFPxS8rt",
- "QxWmnz1KrrcCa/wyvS6FxBoJKm72zyBlBc3jkkOW9k28GVsx+wZDpSAo8u8Gsu/bWCpyDyXU6ToONcdL",
- "cjAPXhpxu5GxC6bYIgds8cS2WFCFnLw2RNVdzPKA67XC5k8nNF9XPJOQ6bWyiFWC1EIdqje182oB+hKA",
- "kwNs9+QFeYhuO8Uu4JHBorufZ4dPXqDR1f5xELsA3BsaY9wkQ3byD8dO4nSMfks7hmHcbtS9aDq5fURr",
- "mHGNnCbbdcpZwpaO1+0+SwXldAXxSJFiB0y2L+4mGtI6eOGZfQFGaSm2hOn4/KCp4U8D0eeG/VkwSCqK",
- "gunCOXeUKAw9NRX87aR+OPucjCu+6uHyH9FHWnoXUUeJvF+jqb3fYqtGT/Y7WkAbrXNCbWGMnDXRC74k",
- "NDn2dXewGm1dhNbixsxllo5iDgYzLEkpGdeoWFR6mfyVpGsqaWrY394QuMni2+eRCrztSpD8eoDfO94l",
- "KJAXcdTLAbL3MoTrSx5ywZPCcJTsUZPtEZzKQWdu3G035DscH3qqUGZGSQbJrWqRGw049a0Ij48MeEtS",
- "rNdzLXq89srunTIrGScPWpkd+vnDWydlFELGiuk1x91JHBK0ZHCBsXvxTTJj3nIvZD5pF24D/Zf1PHiR",
- "MxDL/FmOKQIvRUQ79VWha0u6i1WPWAeGjqn5YMhg4Yaak3YF3vt3+nnjc9/5ZL54WPGPLrBfeEsRyX4F",
- "A5sYVAePbmdWfw/835S8FJupm9o5IX5j/w1QE0VJxfLslyYrs1N8XVKerqP+rIXp+GvzTFS9OHs/RWvW",
- "rSnnkEeHs7Lgr15mjEi1/xRT5ykYn9i2Ww/eLrezuAbwNpgeKD+hQS/TuZkgxGo74a0OqM5XIiM4T1Mg",
- "reGe/XcEgmrP/6pA6VjyEH6wQV1otzT6ri02TIBnqC3ukTf2Jdg1kFb5G9TS6ioCrvStNahXZS5oNsdC",
- "DqffH70ldlbbxz52Yosdr1BJaa+iY68Kaj9OCw/275bEUxemjzMeS21WrTRWo1KaFmUsOdS0OPUNMAM1",
- "tOGj+hJiZ4+8Dt50tHmkZghDD0smC6Nx1aNZ2QVpwvxHa5quUSVrsdRhkp9epdtTpQpexqtfuKkLIuK5",
- "M3C7Qt22TvecCKM3XzJlHwCFC2jno9bJ2c4k4PNT28uTFeeWUqKyx1jxgJug3QNnAzW8mT8KWQfx1xTI",
- "bZH76xYtP8Fe0QJN3QrovSfxbHZj/XKJf9g5pVxwlmJ5pNjV7F4KneIDm1BJqmtk9UfcndDI4YrWXa/D",
- "5BwWByuxe0boENc3wgdfzaZa6rB/anySck01WYFWjrNBNvfPBzg7IOMKXIFLfFc24JNCtvyKyCGjruqk",
- "dmlck4wwLWZAsfvBfHvn1H6MFz9nHAV8hzYXmm4tdfiQoTZaAdNkJUC59bRzg9VH02cP02Qz2Hza8w8f",
- "2mow6JYzy7Y+6P5QR94j7TzApu0r09bVCap/bkUg20mPytJNOvy4RFQe0Bs+iOCIZzHxrp0AufX44Wgj",
- "5DYaSoL3qSE0uEBHNJR4D/cIo35oofOIjxFaLUVhC2JDuKIVDBiPgPGWcWie5YxcEGn0SsCNwfM60E+l",
- "kmorAk7iaadAc/Q+xxia0s71cNuhurWEDEpwjX6O4W1s3ogYYBx1g0Zwo3xbvwZqqDsQJl7hM8QOkf0X",
- "H1CqckJUhhkFnTcgYozDMG7/ykz7Augfg75MZLtrSe3Juc5NNJQkuqiyFeiEZlmsItVL/Erwqy8uBRtI",
- "q7owZVmSFGuitIvE9KnNTZQKrqpiZC7f4JbTBY+qRKghfNjF7zAmoSy2+G+sKuPwzrggjGuHAfqIC/cK",
- "xTXl5vZIPanX0HSi2CqZjgm8U26PjmbqmxF60/9OKT0XqzYg91waYozLhXsU42/fm4sjrJzQKzVqr5a6",
- "sAEG3Qn/FB6qjXVKbpsr4VXWqz2Kzp76qa1xA8Two1lzvPwGQm+DghjU3q/WezgUgJsOxotT7TLXNCWj",
- "LGgwG8hG79i8H4QibjkditixATvmc6/3NMmwJ2fj2KMI9aFgfYB+9HGmpKTMucYbZtHHrItIHzYXjh26",
- "ZoO7i3Bx3oMWux8vhmKyiWJ8lQPB791nhs7BpbPX78zbtfqoJK8S2l/dM692vDoqPrr+fnQCTvVlzaCD",
- "RttTV9LeLtPp5D/+YmPYCHAtt/8GJtzepvceaepLu9Y81TQhdTnkSeWRW7di/L2l4fpHTc0jpKdSKNaU",
- "4I49xDQx1u0U31IK6jf1x/KBJheQaqy73jjQJcB1qjmZyYJH/r7WQRrQHeuQQFf+aKzmUb/Y+o4LrZeW",
- "FKTW2ULVe9Mr/BzVYVLIlLAC7gq4e2evnXAwOex5uYRUs4sdaWD/WAMPUozm3ghh38sNssJYHUaLVUSu",
- "b2JrABrL0hqFJ6jmd2twhpJAzmH7QJEWNUQrZ8/9vXKTAhKIAeQOiSERoWJhCNZq6jzDTNWUgVjwYT+2",
- "OzSluAbf3AmSGm84lydJc+M2iY4jU8Yf/Zg0l+l6rfRfjAgdyhTrPxowLGy/xjcaVP0eni9AEaqk5Lhf",
- "pu/SFbDApL3aUeBLWYDyv/kMXTtLzs4hfBUI3TKXVGa+RdTO4E0Yych91Evv8gXvu0Av65lZE6TZT+iJ",
- "FH7CUNw0F0b+SobimdtxkeHj+Rj9YUt+Y8SngWsJ0r2ehsJeLhQkWvigzjE4xlDhHnq/CRLUYLFFC9xg",
- "CZQPTY0XLDpLseQJdZEt4QKJhIIa6GRQiWV4zjFkv7LffQaLLzq605xS0+vuQvM+PJepHhJDql8Sd1vu",
- "zoy5iWWFcW7falWxsizcoDI0/ZdSZFVqL+jwYNTWp8lFj0ZYSdQokfZX2dMvcywB9jbIMzyH7b4V/X2p",
- "fr+VIfRWhLJrCPL6O7t9p0anuH6dr+wCVncC55c03MxnpRB5MmDrP+5Xl+megXOWnkNGzN3hA9sGni0h",
- "D9HEXDtzL9dbX02lLIFD9miPkCNuQ4m9X7dd3rgzOX+gx+bf4KxZZQs+OZvS3hmPx2RiKSZ5S/7mhxnn",
- "agoM87vlVHaQHbVLNgOVbSS9jDziszdVKe17WrsPqzREZaGISSk7nrCIeJH9mwj+hQ2fsaJFwdL+Kwo9",
- "UWKJr1ElNDL4cc3A5623Alnn4Q5fY8g+05BSK8AZ5YGyvJLgMgfsszmdcvol1Wu/faZ5X8wyVzYoDOu3",
- "JdmpskqBV07cmz3dcyHKJIcLaDkSXDpDlaagFLuA8L0f25lkACWq6t0LJGYhD+mqw0Pc2pPAxjoFu1Gm",
- "YhFrd4rs4BgDj7EnljzUVBIyEF2wrKIt/KlbPMUy8W33ENaJJ+TahyO+uN7RcM+lJHUxt5gh06WT+C00",
- "9Ns87dIRkIInWOoxB96irLFwG2FkELVxzN6siMYkeujbtCNHJnh2ZdzyEtbYaYJ3pXWNoKbmT113S39q",
- "TuO0B2B8hx3ghQa54AkYLwk5cL5whO1PNVKCpQxSQmv5u2x8boEN+wq2yPJus0xb8cxGZ7X3JTDgqle1",
- "XXToXaau+RQL6giORcb6ZleFrjKsVR4SjuHd8oLm9286xUpLR4gP985tfKGh7S1EskWlulmY21s6ae7A",
- "znZ3U/P3aOr9B5g9ivo43VDO51HLCt4zhCyT5iQXzQt3OCS5xDGtU/TJt2ThUnRKCSlTrJO9eOnLKNem",
- "JnxVoHn+eNy2tWudvwh9CzJeevGFvGtKsmqBN0YDYXNEvzBTGTi5USqPUV+PLCL4i/GosFbGjuvivOUt",
- "tSWuO2GAQsIde02D+Kdrek37VUCmLs96Bs2lUynor3Pybd3CbeSibtY21eXfR+5Y3c4pnvp4OV7THUMF",
- "LEKwljVBUMlvT34jEpb4WI0gjx/jBI8fz13T3562P5vj/Phx/Jnl+woSsDhyY7h5YxTzy1DYuA2NHshQ",
- "6OxHxfJsF2G08k2a554wo+JXl3H2RR6c+tX6cvpH1T36cZ3wpO4mIGIia21NHkwVZJJMSCJx3SIpI2gV",
- "SSvJ9BYL4XjTP/s1Gs7wpvYWOm9zXTrB3X1anENdSqnxLVbK365vBM3xPjIyNQaHaXxa9/sNLcoc3EH5",
- "7sHiL/Dsr8+zg2dP/rL468E3Byk8/+bFwQF98Zw+efHsCTz96zfPD+DJ8tsXi6fZ0+dPF8+fPv/2mxfp",
- "s+dPFs+/ffGXB4YPGZAtoDOfdj373/gqW3L0/jg5NcA2OKElq1/UNmTsn5ahKZ5EKCjLZ4f+p//fn7C9",
- "VBTN8P7XmcvqnK21LtXh/v7l5eVe2GV/hc6ERIsqXe/7efovGb8/rjNzrGqJO2qTLrzJwJPCEX778P3J",
- "KTl6f7wXvJR5ODvYO9h7gg8plsBpyWaHs2f4E56eNe77viO22eHnq/lsfw00R9+7+aMALVnqP6lLulqB",
- "3HNv7JifLp7ue1Fi/7NzpFyNfdsPy1Xvf275m7IdPbGc7f5nX6VlvHWrDIrzswUdJkIx1mx/gcmfU5uC",
- "ChoPLwUVDLX/GUXkwd/3XUZc/COqKvYM7HunbLxlC0uf9cbA2unhnuTf/4z/QZoMwLLxp31wYVOCZEbI",
- "Qvez+9Xmj+3bJy77P295Gv2xP3zvqYUVRFPfMAmNjj0mj2fDHqvjDLmd7r2Nj3Wbre0Dj8zTg4M/xzP5",
- "z68J6KilpRVHGgHmJc2Iz0LEuZ/c39zHHGM+DAcklsMjBM/vD4J2kewfYUveCU1+QFXkaj775j534pgb",
- "wYjmBFsGNXD6R+Rnfs7FJfctjWhQFQWV28nHR9OVQjeEZBfUCWbBuwmzT+j3spmp7aN2lGU9orciEij9",
- "UmTbEYwValW6rJEGaY2EyLhZQl/F7D9E2XvL/hy2xEYFePcBFxnMQtlNywqubskT/rTP7n/lKV95irTT",
- "P7u/6U9AXrAUyCkUpZBUsnxLfuZ1zu+NedxRlkUjLttHfyePM9p3KjJYAU8cA0sWItv6uoatCc7BKoM9",
- "QWb/c7s4uRUMZxnkoKPRZOb3+tnI/iIWW3L8uifh2G5dzvtyi02Dot+HHz9bbcqoCo2y0wWxxxnDetNd",
- "3vQpzjXHyN4sZCU0sVjI3KK+MqKvjOhWws3kwzNFvolqH7aiBu3d2XNfHCNWFonqPihTdJQvenzvZOP7",
- "+k9M37GRq5CR4INNseii+SuL+Moibsci3kDkMOKpdUwjQnTX04emMgwMwcm6TwChQ8E3r3IqiYKpZo4j",
- "HNEZN+6Da9y3UhfFldXpKG9eSYts4N3qeV9Z3leW9+dheUe7GU1bMLm1ZnQO24KWtT6k1pXOxGXgX0BY",
- "bLxP3w5cP0ra+nv/kjKdLIV0eVBYIrvfWQPN912Fn86vTVJ97wtWCgh+DIMYo7/u1y8QRD92XROxr840",
- "P9DI12fznxvXZOjqQ9ZeO/k+fjJsGevbOq7feK4O9/cxt2AtlN6fXc0/d7xa4cdPNQl8ru8KRwpXn67+",
- "XwAAAP//qWbn/XHIAAA=",
+ "H4sIAAAAAAAC/+x9a3PcNrLoX0HNOVV+3KFGfiS7VlXqXNlOsrpxHJelZO9Z2zfBkD0zWJEAFwBHM/HV",
+ "fz+FBkCCJDjDkRR7U+VPtoZ4NBqNRr/Q/XGSiqIUHLhWk5OPk5JKWoAGiX/RNBUV1wnLzF8ZqFSyUjPB",
+ "Jyf+G1FaMr6cTCfM/FpSvZpMJ5wW0LQx/acTCf+qmIRscqJlBdOJSldQUDOw3pamdT3SJlmKxA1xaoc4",
+ "ezm53vGBZpkEpfpQ/sTzLWE8zasMiJaUK5qaT4pcMb0iesUUcZ0J40RwIGJB9KrVmCwY5Jk68ov8VwVy",
+ "G6zSTT68pOsGxESKHPpwvhDFnHHwUEENVL0hRAuSwQIbragmZgYDq2+oBVFAZboiCyH3gGqBCOEFXhWT",
+ "k3cTBTwDibuVAlvjfxcS4HdINJVL0JMP09jiFhpkolkRWdqZw74EVeVaEWyLa1yyNXBieh2RHyulyRwI",
+ "5eTtdy/IkydPnpmFFFRryByRDa6qmT1ck+0+OZlkVIP/3Kc1mi+FpDxL6vZvv3uB85+7BY5tRZWC+GE5",
+ "NV/I2cuhBfiOERJiXMMS96FF/aZH5FA0P89hISSM3BPb+E43JZz/s+5KSnW6KgXjOrIvBL8S+znKw4Lu",
+ "u3hYDUCrfWkwJc2g746TZx8+Ppo+Or7+j3enyT/cn189uR65/Bf1uHswEG2YVlICT7fJUgLF07KivI+P",
+ "t44e1EpUeUZWdI2bTwtk9a4vMX0t61zTvDJ0wlIpTvOlUIQ6MspgQatcEz8xqXhu2JQZzVE7YYqUUqxZ",
+ "BtnUcN+rFUtXJKXKDoHtyBXLc0ODlYJsiNbiq9txmK5DlBi4boQPXNC/LzKade3BBGyQGyRpLhQkWuy5",
+ "nvyNQ3lGwguluavUYZcVuVgBwcnNB3vZIu64oek83xKN+5oRqggl/mqaErYgW1GRK9ycnF1if7cag7WC",
+ "GKTh5rTuUXN4h9DXQ0YEeXMhcqAckefPXR9lfMGWlQRFrlagV+7Ok6BKwRUQMf8npNps+/85/+k1EZL8",
+ "CErRJbyh6SUBnooMsiNytiBc6IA0HC0hDk3PoXU4uGKX/D+VMDRRqGVJ08v4jZ6zgkVW9SPdsKIqCK+K",
+ "OUizpf4K0YJI0JXkQwDZEfeQYkE3/UkvZMVT3P9m2pYsZ6iNqTKnW0RYQTffHE8dOIrQPCcl8IzxJdEb",
+ "PijHmbn3g5dIUfFshJijzZ4GF6sqIWULBhmpR9kBiZtmHzyMHwZPI3wF4PhBBsGpZ9kDDodNhGbM6TZf",
+ "SEmXEJDMEfnZMTf8qsUl8JrQyXyLn0oJayYqVXcagBGn3i2Bc6EhKSUsWITGzh06DIOxbRwHLpwMlAqu",
+ "KeOQGeaMQAsNllkNwhRMuFvf6d/ic6rg66dDd3zzdeTuL0R313fu+KjdxkaJPZKRq9N8dQc2Llm1+o/Q",
+ "D8O5FVsm9ufeRrLlhbltFizHm+ifZv88GiqFTKCFCH83KbbkVFcSTt7zh+YvkpBzTXlGZWZ+KexPP1a5",
+ "ZudsaX7K7U+vxJKl52w5gMwa1qjChd0K+48ZL86O9SaqV7wS4rIqwwWlLcV1viVnL4c22Y55KGGe1tpu",
+ "qHhcbLwycmgPvak3cgDIQdyV1DS8hK0EAy1NF/jPZoH0RBfyd/NPWeamty4XMdQaOnZXMpoPnFnhtCxz",
+ "llKDxLfus/lqmABYRYI2LWZ4oZ58DEAspShBamYHpWWZ5CKleaI01TjSf0pYTE4m/zFr7C8z213Ngslf",
+ "mV7n2MmIrFYMSmhZHjDGGyP6qB3MwjBo/IRswrI9FJoYt5toSIkZFpzDmnJ91KgsLX5QH+B3bqYG31ba",
+ "sfjuqGCDCCe24RyUlYBtw3uKBKgniFaCaEWBdJmLef3D/dOybDCI30/L0uIDpUdgKJjBhimtHuDyaXOS",
+ "wnnOXh6R78OxURQXPN+ay8GKGuZuWLhby91itW3JraEZ8Z4iuJ1CHpmt8WgwYv5dUByqFSuRG6lnL62Y",
+ "xn9zbUMyM7+P6vznILEQt8PEhYqWw5zVcfCXQLm536GcPuE4c88ROe32vRnZmFHiBHMjWtm5n3bcHXis",
+ "UXglaWkBdF/sXco4Kmm2kYX1ltx0JKOLwhyc4YDWEKobn7W95yEKCZJCB4bnuUgv/0bV6g7O/NyP1T9+",
+ "OA1ZAc1AkhVVq6NJTMoIj1cz2pgjZhqigk/mwVRH9RLvanl7lpZRTYOlOXjjYolFPfZDpgcyorv8hP+h",
+ "OTGfzdk2rN8Oe0QukIEpe5ydkyEz2r5VEOxMpgFaIQQprIJPjNZ9EJQvmsnj+zRqj761NgW3Q24RuENi",
+ "c+fH4LnYxGB4Lja9IyA2oO6CPsw4KEZqKNQI+F46yATuv0MflZJu+0jGsccg2SzQiK4KTwMPb3wzS2Oc",
+ "PZ0LeTPu02ErnDQmZ0LNqAHznXaQhE2rMnGkGDFb2QadgRov326m0R0+hrEWFs41/QOwoMyod4GF9kB3",
+ "jQVRlCyHOyD9VZTpz6mCJ4/J+d9Ov3r0+NfHX31tSLKUYilpQeZbDYrcd7oZUXqbw4P+ylA7qnIdH/3r",
+ "p95Q2R43No4SlUyhoGV/KGsAtSKQbUZMuz7W2mjGVdcAjjmcF2A4uUU7sbZ9A9pLpoyEVczvZDOGEJY1",
+ "s2TEQZLBXmI6dHnNNNtwiXIrq7tQZUFKISP2NTxiWqQiT9YgFRMRb8ob14K4Fl68Lbu/W2jJFVXEzI2m",
+ "34qjQBGhLL3h4/m+Hfpiwxvc7OT8dr2R1bl5x+xLG/nekqhICTLRG04ymFfLlia0kKIglGTYEe/o70Gj",
+ "KHDBCjjXtCh/WizuRlUUOFBEZWMFKDMTsS2MXK8gFdxGQuzRztyoY9DTRYw30elhABxGzrc8RTvjXRzb",
+ "YcW1YBydHmrL00CLNTDmkC1bZHl7bXUIHXaqeyoCjkHHK/yMho6XkGv6nZAXjSXweymq8s6FvO6cY5dD",
+ "3WKcKSUzfb0Ozfgyb0ffLA3sR7E1fpYFvfDH160BoUeKfMWWKx2oFW+kEIu7hzE2SwxQ/GCVstz06atm",
+ "r0VmmImu1B2IYM1gDYczdBvyNToXlSaUcJEBbn6l4sLZQLwGOorRv61DeU+vrJ41B0NdKa3MaquSoPe2",
+ "d180HROa2hOaIGrUgO+qdjraVnY6GwuQS6DZlswBOBFz5yByritcJEXXs/bijRMNI/yiBVcpRQpKQZY4",
+ "w9Re0Hw7e3XoHXhCwBHgehaiBFlQeWtgL9d74byEbYKBEorc/+EX9eAzwKuFpvkexGKbGHprNd95AftQ",
+ "j5t+F8F1Jw/Jjkog/l4hWqA0m4OGIRQehJPB/etC1NvF26NlDRL9cX8oxftJbkdANah/ML3fFtqqHAj/",
+ "c+qtkfDMhnHKhResYoPlVOlkH1s2jVo6uFlBwAljnBgHHhC8XlGlrQ+Z8QxNX/Y6wXmsEGamGAZ4UA0x",
+ "I//iNZD+2Km5B7mqVK2OqKoshdSQxdbAYbNjrtewqecSi2DsWufRglQK9o08hKVgfIcsuxKLIKprV4sL",
+ "sugvDh0S5p7fRlHZAqJBxC5Azn2rALthCNQAIEw1iLaEw1SHcuq4q+lEaVGWhlvopOJ1vyE0ndvWp/rn",
+ "pm2fuKhu7u1MgMLIK9feQX5lMWuD31ZUEQcHKeilkT3QDGKd3X2YzWFMFOMpJLsoH1U80yo8AnsPaVUu",
+ "Jc0gySCn2/6gP9vPxH7eNQDueKPuCg2JjWKKb3pDyT5oZMfQAsdTMeGR4BeSmiNoVIGGQFzvPSNngGPH",
+ "mJOjo3v1UDhXdIv8eLhsu9WREfE2XAttdtzRA4LsOPoYgAfwUA99c1Rg56TRPbtT/DcoN0EtRxw+yRbU",
+ "0BKa8Q9awIAN1QWIB+elw947HDjKNgfZ2B4+MnRkBwy6b6jULGUl6jo/wPbOVb/uBFE3I8lAU5ZDRoIP",
+ "Vg0sw/7Ext90x7yZKjjK9tYHv2d8iywnZwpFnjbwl7BFnfuNDewMTB13octGRjX3E+UEAfXhYkYED5vA",
+ "hqY63xpBTa9gS65AAlHVvGBa24DttqqrRZmEA0T9GjtmdE48GxTpd2CMV/EchwqW19+K6cTqBLvhu+go",
+ "Bi10OF2gFCIfYSHrISMKwah4D1IKs+vMxY776GFPSS0gHdNGD259/d9TLTTjCsh/i4qklKPKVWmoZRoh",
+ "UVBAAdLMYESwek4X2dFgCHIowGqS+OXhw+7CHz50e84UWcCVf3BhGnbR8fAh2nHeCKVbh+sO7KHmuJ1F",
+ "rg90+JiLz2khXZ6yP7LAjTxmJ990Bq+9ROZMKeUI1yz/1gygczI3Y9Ye0si4qAocd5QvJxg6tm7c93NW",
+ "VDnVd+G1gjXNE7EGKVkGezm5m5gJ/u2a5j/V3fbodE0UGCsKyBjVkG9JKSEFG51vRDVVj31EbNxeuqJ8",
+ "iRK6FNXSBY7ZcZDDVsraQmTFe0NEpRi94QlalWMc1wUL+wcaRn4BanSorknaagxXtJ7PvckZcxX6nYuY",
+ "6KNeqelkUMU0SF03KqZFTvuVyQju2xKwAvw0E4/0XSDqjLDRx1e4LYZ6zeb+MTbyZugYlP2Jg1C25uNQ",
+ "NJvRb/PtHUgZdiAioZSg8E4I7ULKfhWL8EWZuzTUVmko+qZz2/XXgeP3dlBBEzxnHJJCcNhGH1EzDj/i",
+ "x+hxwntpoDNKCEN9u0J/C/4OWO15xlDjbfGLu909oV0XkfpOyLvyQdoBR8vTI1x+e/3bbsqbOiZpnkd8",
+ "ee69SZcBqGn9vp1JQpUSKUMh6SxTU3vQnPvPPU5po/9NHUV7B2evO27HaRU+ZUSjLOQloSTNGZpsBVda",
+ "Vql+zykahYKlRqKNvPY7bCZ84ZvE7ZIRs6Eb6j2nGGlWm4qiERILiNhFvgPw1kJVLZegdEe5WAC8564V",
+ "46TiTONchTkuiT0vJUgM+TmyLQu6JQtDE1qQ30EKMq90W9zG51RKszx3HjQzDRGL95xqkgNVmvzI+MUG",
+ "h/Nedn9kOegrIS9rLMRv9yVwUEwl8aio7+1XDFh1y1+54FV8/m4/W5+LGb95c7VFm1HzpPv/3f+vk3en",
+ "yT9o8vtx8ux/zT58fHr94GHvx8fX33zz/9s/Pbn+5sF//Wdspzzsscc+DvKzl04VPXuJ+kbjdOnB/skM",
+ "7gXjSZTIwvCJDm2R+/iw1RHQg7Y1Sq/gPdcbbghpTXOWGd5yE3Lo3jC9s2hPR4dqWhvRsT75tR4oxd+C",
+ "y5AIk+mwxhtLUf1AwvizOvQCupdyeF4WFbdb6aVv+2rEB3SJxbR+OmmzqpwQfFe3oj4a0f35+KuvJ9Pm",
+ "PVz9fTKduK8fIpTMsk3s1WMGm5hy5g4IHox7ipR0q0DHuQfCHo1ds8EU4bAFGK1erVj56TmF0mwe53A+",
+ "Ft8ZeTb8jNsgeXN+0Ke4da4Ksfj0cGsJkEGpV7FsCy1BDVs1uwnQifMopVgDnxJ2BEddI0tm9EUXRZcD",
+ "XeCrf9Q+xRhtqD4HltA8VQRYDxcyypIRox8UeRy3vp5O3OWv7lwdcgPH4OrOWTsQ/d9akHvff3tBZo5h",
+ "qnv2Aa4dOngyGVGl3augVgSQ4WY2x4wV8t7z9/wlLBhn5vvJe55RTWdzqliqZpUC+ZzmlKdwtBTkxD80",
+ "ekk1fc97ktZgGqjgiRcpq3nOUnIZKiQNedrUHv0R3r9/R/OleP/+Qy8Yoq8+uKmi/MVOkBhBWFQ6cYkJ",
+ "EglXVMacTap+mI4j28wju2a1QraorEXSJz5w48d5Hi1L1X2g2l9+WeZm+QEZKvf80mwZUVpIL4sYAcVC",
+ "g/v7WriLQdIrb1epFCjyW0HLd4zrDyR5Xx0fPwHSerH5m7vyDU1uSxhtXRl8QNs1quDCrVoJGy1pUtJl",
+ "zKf1/v07DbTE3Ud5uUAbR54T7NZ6Keoj4XGoZgEeH8MbYOE4+NUbLu7c9vJJqOJLwE+4hdjGiBuNp/2m",
+ "+xW8Hb3xdnXen/Z2qdKrxJzt6KqUIXG/M3VumqURsnz4g2JL1FZdGp85kHQF6aXLrwJFqbfTVncfYeME",
+ "Tc86mLKZd+zLL8z9gB6BOZCqzKgTxSnfdh/hK9Dax/G+hUvYXogmdcQhr+7bj8DV0EFFSg2kS0Os4bF1",
+ "Y3Q334VxoWJflv4tNT6q82RxUtOF7zN8kK3IeweHOEYUrUfKQ4igMoIIS/wDKLjBQs14tyL92PKMljG3",
+ "N18kC4/n/cQ1aZQnF3EVrgat7vZ7AZjGS1wpMqdGbhcuA5V96BxwsUrRJQxIyKFTZuRz4pYjBwfZd+9F",
+ "bzqx6F5ovfsmCrJtnJg1RykFzBdDKqjMdOLs/EzW7+c8E5hY0iFsnqOYVAckWqZDZcs5ZjPlDYEWJ2CQ",
+ "vBE4PBhtjISSzYoqnxwLc4j5szxKBvgDH+7vStdyFoSIBYnC6mQsnud2z2lPu3RJW3ymFp+eJVQtR6Ra",
+ "MRI+RqXHtkNwFIAyyGFpF24be0Jpkgg0G2Tg+GmxyBkHksSizQIzaHDNuDnAyMcPCbEWeDJ6hBgZB2Cj",
+ "PxsHJq9FeDb58hAguUuCQP3Y6AkP/ob4ey0bf21EHlEaFs4GvFqp5wDUhSjW91cnUBaHIYxPiWFza5ob",
+ "Nuc0vmaQXtYQFFs7OUJcRMWDIXF2hwPEXiwHrcleRTdZTSgzeaDjAt0OiOdik9gHm1GJd76ZG3qPhqTj",
+ "89HYwbT5We4pMhcbjNLBq8WGQO+BZRgOD0ag4W+YQnrFfkO3uQVm17S7pakYFSokGWfOq8llSJwYM/WA",
+ "BDNELveDlCs3AqBj7GjyFzvld6+S2hZP+pd5c6tNm1Ri/rVP7PgPHaHoLg3gr2+FqZOkvOlKLFE7RTvY",
+ "pJ0fJhAhY0Rv2ETfSdN3BSnIAZWCpCVEJZcxz6nRbQBvnHPfLTBeYBYayrcPgggmCUumNDRGdB8n8TnM",
+ "kxST3wmxGF6dLuXCrO+tEPU1Zd2I2LG1zE++AgwBXjCpdIIeiOgSTKPvFCrV35mmcVmpHSNlU8WyLM4b",
+ "cNpL2CYZy6s4vbp5f3hppn1ds0RVzZHfMm4DVuaY2jgaObljahtcu3PBr+yCX9E7W++402CamomlIZf2",
+ "HH+Sc9HhvLvYQYQAY8TR37VBlO5gkMGL1z53DOSmwMd/tMv62jtMmR97b9SOf3c7dEfZkaJrCQwGO1fB",
+ "0E1kxBKmg8zA/aeoA2eAliXLNh1bqB11UGOmBxk8fD61DhZwd91gezAQ2D1jr2EkqHbqvEbAtzmeW5lr",
+ "jkZh5qKd4C5kCOFUTPkKBX1E1a/l9uHqAmj+A2x/MW1xOZPr6eR2ptMYrt2Ie3D9pt7eKJ7RNW9NaS1P",
+ "yIEop2UpxZrmiTMwD5GmFGtHmtjc26M/MauLmzEvvj199caBfz2dpDlQmdSiwuCqsF35p1mVzdI3cEB8",
+ "BnSj83mZ3YqSwebXqcVCo/TVClwq6UAa7eW8bBwOwVF0RupFPEJor8nZ+UbsEnf4SKCsXSSN+c56SNpe",
+ "EbqmLPd2Mw/tQDQPLm5c4tQoVwgHuLV3JXCSJXfKbnqnO346Guraw5PCuXYkuy5sPndFBO+60DHmeVs6",
+ "r3tBMWOltYr0mROvCrQkJCpnadzGyufKEAe3vjPTmGDjAWHUjFixAVcsr1gwlmk2JidNB8hgjigyVTQt",
+ "ToO7uXC1eirO/lUBYRlwbT5JPJWdg4rpTZy1vX+dGtmhP5cb2From+FvI2OE2Vq7Nx4CsVvACD11PXBf",
+ "1iqzX2htkTI/BC6JAxz+4Yy9K3GHs97Rh6NmG7y4anvcwtI6ff5nCMPmWN9f18crry5t7MAc0To9TCUL",
+ "KX6HuJ6H6nHkoZHPT8swyuV3CB86hNUpWiymtu405Yaa2Qe3e0i6Ca1Q7SCFAarHnQ/ccpgo01uoKbdb",
+ "bctmtGLd4gQTRpXO7PgNwTiYe5G4Ob2a01gWUSNkGJhOGwdwy5auBfGdPe5V/drCzk4CX3LdltlH5CXI",
+ "5g1gPyHNDQUGO+1oUaGRDJBqQ5lgav1/uRKRYSp+RbmtvmL62aPkeiuwxi/T60pITAGh4mb/DFJW0Dwu",
+ "OWRp38SbsSWzhUUqBUHlCjeQLdpkqchV/6jfEDnUnC3I8TQon+N2I2Nrptg8B2zxyLaYU4WcvDZE1V3M",
+ "8oDrlcLmj0c0X1U8k5DplbKIVYLUQh2qN7Xzag76CoCTY2z36Bm5j247xdbwwGDR3c+Tk0fP0Ohq/ziO",
+ "XQCuMMwubpIhO/m7YydxOka/pR3DMG436lH0tbytDDfMuHacJtt1zFnClo7X7T9LBeV0CfFIkWIPTLYv",
+ "7iYa0jp44Zkta6S0FFvCdHx+0NTwp4Hoc8P+LBgkFUXBdOGcO0oUhp6ashR2Uj+crZHkMgp7uPxH9JGW",
+ "3kXUUSI/rdHU3m+xVaMn+zUtoI3WKaE270fOmugFn+ecnPm0Qphiuc6sbHFj5jJLRzEHgxkWpJSMa1Qs",
+ "Kr1I/krSFZU0NezvaAjcZP7100ha6XZ6U34Y4J8c7xIUyHUc9XKA7L0M4fqS+1zwpDAcJXvQvPYITuWg",
+ "MzfuthvyHe4eeqxQZkZJBsmtapEbDTj1rQiP7xjwlqRYr+cgejx4ZZ+cMisZJw9amR36+e0rJ2UUQsZy",
+ "BTbH3UkcErRksMbYvfgmmTFvuRcyH7ULt4H+83oevMgZiGX+LMcUgeciop36VOe1Jd3FqkesA0PH1Hww",
+ "ZDB3Q01JO630p+ejdxMFFfd0ecN237Flvng84B9dRHxmcsENbHz5diUDhBKk1Y+STFZ/D3zslDwXm7GE",
+ "0zmFnnj+DVAURUnF8uyX5uVnp2qBpDxdRX1mc9Px16a+Wr04ewdG0/6tKOeQR4ez8uavXi6NSM7/FGPn",
+ "KRgf2bZbSMEut7O4BvA2mB4oP6FBL9O5mSDEavtRXR20nS9FRnCeJsdcc1z7BTiCNOn/qkDp2AMl/GAD",
+ "x9A2atiBzdJNgGeokR6R720J5RWQVgIh1AR9poj2q+mqzAXNppjB4uLb01fEzmr72CpBNkv4EhWh9io6",
+ "NrEgfea4EGRf8Cf+PGL8OLvjtc2qlU7qpN6xB6imRZN2nHX8BKgihdg5Ii+DYqj2raoZwtDDgsnCaHX1",
+ "aFY+Qpow/9GapitU+1qsdZjkx6e391SpgpKSdWmoOqcknjsDt8twbxPcT4kwuvkVU7ZyLqyh/ea1fgDu",
+ "zA7+DWx7ebLi3FLK0QG3XJ1B8lC0e+DsFeldCVHIOog/UOi31SEOzfZ/jr2iKa66pQN6tSTtC8q65I+v",
+ "iJ5SLjhLMcFU7Ip2JXbH+NlG5OLqGnL9EXcnNHK4ogUL6lA8h8XBEgaeETrE9Q39wVezqZY67J8aa7mu",
+ "qCZL0MpxNsimvu6GszUyrsDlCMWCzAGfFLLlu0QOGXWHJ7Xb5EAywqc3A8rjd+bba2dawJj0S8ZRiXBo",
+ "c4KftQZiBVBtNA+myVKAcutpvz9W70yfI3yKm8Hmw5GvGIpjWNefWbb1c/eHOvVeb+dlNm1fmLYuQVL9",
+ "cyvK2U56WpZu0uGqLFF5QG/4IIIj3svEu48C5Nbjh6PtILed4Sp4nxpCgzU6u6HEe7hHGHWFkk71KyO0",
+ "WorCFsSGiUWzJDAeAeMV49DUs41cEGn0SsCNwfM60E+lkmorAo7iaRdAc/Rwxxia0s69cduhuumhDEpw",
+ "jX6O4W1siqsMMI66QSO4Ub6ty+ga6g6EiRdYv9shsl8qBaUqJ0Rl+GqhUzwlxjgM4/blmdoXQP8Y9GUi",
+ "211Lak/OITfR0EPUeZUtQSc0y2IpW5/jV4JfSVah5AAbSKs6tWdZkhTzrrQT0fSpzU2UCq6qYsdcvsEt",
+ "pwuqEUWoIayI5HcYH7rMt/hvLK/l8M64QI+DQw19VEd2WPalfuhkTOo1NJ0otkzGYwLvlNujo5n6ZoTe",
+ "9L9TSs/Fsg3IJ04/sYvLhXsU42/fmosjzM7QS9Zqr5Y6eQIG9glfQxLVxvrZb5sr4VXWy96KDqW6Rt1u",
+ "A8RwtbkpXn4D4b1B0g1q71froRwK8k0HY9Kpdq/jNCU7WdDgiyMbIWTfFiEUcevsUFSQDQoyn3u9x0mG",
+ "PTlbxxMfBgj14WZ9gH7wsaykpMy53xtm0cesi3rvv0MYEw/bbHB3ES6WfNBi98N6KO7bJ2PD791qVJfg",
+ "nsyXEtZMVN6x7SOfvEpof23Vdqoj76Pr7xtecarPaw4dNN5euKoAdplOJ//hFxsnR4Bruf03MOX2Nr1X",
+ "56ov7VrzVNOE1AmlRyWYbt2KYxIVxnLiOdmwVWlrT52wHlm9HCMO9Ot+TSdn2UEXZiyv4sSOEjt28Spe",
+ "w2mnmlRTeMRKoViT1z1W3mtkiOEFVugK0mb1x/LxPWtINSbzb+IWJMAhSbTMZEHB0C/ppwbU6ToS02Wd",
+ "2pVqqp/Bf88d33sNFrxotNnPj8YnVjqto9OQT2M25CVwV7Oz/c5jdLT5YgGpZus9r+/+vgIevOyaeruM",
+ "rb0dPMZjdfQyJm853OrYALTrcdxOeIIkircGZ+jtzSVs7ynSooZoOvapv2pvkrcDMYDcITEkIlQs+sMa",
+ "kp1DnqmaMhALPtrKdocmA9pgJafgLekN5/IkaS6O5n3pjinjpWRGzWW6HvTqGgNxhx7o9StRDOsfL7Hw",
+ "h6qrLPq8H6GWTs762RGvXN4QfCtZ+058BhFQ/jf/MNrOkrNLCGtNoafqisrMt4iaXrxVJ9lxH/Ve1fkq",
+ "Cl2gF/XMrImN7b+jiuTbwgjoNBdGjEiGwsjb4ah1LMc9ZYNubPp3DLQ1cC1Aupp8KP/mQkGihY+l3QXH",
+ "LlTYyKIbIUEN5ri0wA1mnnnbpNbBXL8UM81QF1AULpBIKKiBTgYJcIbn3IXsF/a7fzjkc73utTDV9Lq/",
+ "6ICPimaqh8SQ6hfE3Zb7HyTdxNjEOLd1n1UsGw4H2faGlFJkVWov6PBg1Aa50bmmdrCSqJ0m7a+yoyME",
+ "rzovYTuzSpCv1uB3MATaSk4W9CCLQmeT79T8pmJwL+8EvM9puZpOSiHyZMDZcdZP4dOl+EuWXkJGzE3h",
+ "owcHKt+Q+2hjr73ZV6utT1lTlsAhe3BEyCm38dresd3OId2ZnN/Tu+bf4KxZZbNqOaPa0XseD3zFfFfy",
+ "ltzMD7ObhykwrO6WU9lB9iSI2QykD5L0KlIH6misVt53NXdr8zREZaGIySRN2Zk9cTJ1iExT+aMJk+lL",
+ "B3kurhKkoqTO/xXTOUy7NpP0GU+bbgbbcwjibahyF+iWrGhGUiElpGGP+BMHC1QhJCS5wPCbmGdwoY08",
+ "VGBcMye5WBJRGjXXptHzPpRoWZpgLvvM1vZMrKNmIJEBKPes1k1jG/fn2VG95vDKOBeriL0FEe2xfHD5",
+ "G0coB1etCMAcQaD7bU2nseo+7XV160MNVWvTomBpHN1/riiTwdiQPbWLIuurydGVVvKvAgdwFXXZ7vaQ",
+ "2jp087F+0jpn8shjEQAw7DltwTDKf3ooGAus65jQCJLPaql12iq7yzpn3+ezszSeUqu1roCYsSsJ7pWa",
+ "LUDXqZxTUr3yt5hp3tctjZ4CCp+Q2fIfVFlLiLfIuOp3XfFAlEkOa2g5lN3TuSpNQSm2hrBynu1MMoAS",
+ "7ZNdqTnmKQ25XEeUcmtPAl/bGOxGZSuLWLtTZI/gFBXzNjyxx0SNPUoGojXLKtrCn7pFLbKhMmQRNuxh",
+ "HckpDmYS8cXtYhF7YxuQ5qPnksdDG8KXm7VRBGfLauOpJcLmZKuSXvFhJSJid6r97bdfB8HBiOq8pB68",
+ "8mW9KzdVIAcpYxdh9OoHRmUOBb7+a5j0xItbrm9ExrKmLqYiAzDVnGeM3oMmOixoVtAtydhiAdIa85Wm",
+ "PKMyC5szTlKQmjKj2WzVzcVaA62sYLpXsjXcFQf1DCYm46JdygKSb53KcAupEz03EYnTXrVaDJVI7O1K",
+ "/DkB3RjpGuOqBojAPYRG2doeMMFRQCIFvYQD51Hsd9g9DaYncbY/LXDWMVPEfK03zK02inX3wxAit1tQ",
+ "DHG3ZyhMvdi86ZI2mgUtyf6C7NL4j83FOa4so++wB7zQYRgUZvS2GwfOZ34c9WONlGApH4YoobX8fT5I",
+ "t8BG0gi2yDECrcEmwrUB9e19CRzM6kXttx2qIdp172KeRcFtkb+eW9jyJlu1LyAccxbkmuaf3rWLCThP",
+ "ER+QvR02Boe+wRDJFpXqZi8TXtFRcwd+wLubmr9BV/TfwexRVCt1QzkRphbrfTAP3iw0t4aLhS/htQZO",
+ "rnBMG8f26Gsydy+3SwkpU13R6MpX16hdYVhsyr0G2eg9vrd96/xF6FuQ8cJrGuR1k6kfdfwlbyBsjuhn",
+ "ZioDJzdK5THq65FFBH8xHhWmUNtzXVy2Atxs5ZPOyw0h4Y4D3YKQ9QMD3frJ4cYuzwZzmUunUtBf5+jb",
+ "uoXbyEXdrG1slGYfubvSuY8JroxXaTDdMbrTIgRLnBAElfz26DciYYE1DAV5+BAnePhw6pr+9rj92Rzn",
+ "hw+j0tkni+u0OHJjuHljFPPL0Es/+5pt4FFpZz8qlmf7CKP1RLipAoqPYH91iQg+Sx3SX22sSf+oulpw",
+ "twiQs4iJrLU1eTBV8Ph3xLtf1y3yyhf9OGklmd5ifkRvP2C/RiNQv6+jmVw0XK0furtPi0uoM2w2sU+V",
+ "8rfr94LmeB9ZtZWbW0jkR+TbDS3KHNxB+ebe/C/w5K9Ps+Mnj/4y/+vxV8cpPP3q2fExffaUPnr25BE8",
+ "/utXT4/h0eLrZ/PH2eOnj+dPHz/9+qtn6ZOnj+ZPv372l3uGDxmQLaATn41n8n+xWG9y+uYsuTDANjih",
+ "JfsBtrYuoCFjX3GQpngSoaAsn5z4n/63P2FHqSia4f2vE5fsY7LSulQns9nV1dVR2GW2xGCHRIsqXc38",
+ "PL2ShKdvzmovkbUC4Y7ad7LeuudJ4RS/vf32/IKcvjk7CurVn0yOj46PHmF58xI4LdnkZPIEf8LTs8J9",
+ "nzlim5x8vJ5OZiugOcYGmj8K0JKl/pMEmm3d/9UVXS5BHrkyjOan9eOZFytmH13Qx/Wub7OwosnsYys2",
+ "JtvTEysezD76RH67W7cy5bmYoKDDSCh2NZvNMT/I2KaggsbDS0FlQ80+org8+PvMJTSIf0S1xZ6HmQ8g",
+ "i7dsYemj3hhYOz1SqtNVVc4+4n+QPgOw7POhmd7wGdo+Zh9bq3Gfe6tp/950D1usC5GBB1gsFjYx6a7P",
+ "s4/232Ai2JQgmRH8MGTP/WpDq2e2GH7/5y1Poz/219GrCha1I721uQwoVq6O1yaY4Hm1R/0sQw6suyGu",
+ "tsSItT3iMX58fHxQtdRxATPdwNr+ndZnXrtWdj2dPD0Q0J3Wn9ZzpAgwz2lGvJMe53706eY+4xgna7gy",
+ "sbcOQvD000HQrufyA2zJa6HJd6geXU8nX33KnTjjRlijOcGWQbrG/hH5mV9yccV9SyOuVEVB5Xb08dF0",
+ "qTCYQ7I1dcJiUOJr8gGjh2zgRvuonWZZj+it2AZKPxd4/w1hrFDL0j0+bpDWSK2MmyX01d5+zfQVRGLU",
+ "bSSl9z5ykcEklCe1rOD6ljyhLbgbEM4iVhw0R2LRrYVPsBqAGg247sbY2JFHVY3uDF4XyanmBVNeXfjC",
+ "U77wFGmnf/Lppj8HuWYpkAsoSiGpZPmW/Mzr1DE35nGnWRZ9pdI++nt53HSySVKRwRJ44hhYMhfZ1qfg",
+ "bk1wCVZB7Qkys4/tOjpWpJtkkIOORuCb3+sK5/1FzLfk7GVPwrHdupz3+RabBvVpTt59tBqeUV8aBawL",
+ "Yo8zhqVRurzpQ5xr7iJ7s5Cl0MRiIXOL+sKIvjCiWwk3ow/PGPkmqn3YxGy0d2dPfY61WAZPqvugjNFR",
+ "PuvxvZON7+s/MX3HvvaBjAQfbBBKF81fWMQXFnE7FvE9RA4jnlrHNCJEd5g+NJZhYARf1q1WiU4O37zK",
+ "qQxij/aZOU5xRGfc+BRc41MrdVFcWZ2O8qagb2QD71bP+8LyvrC8Pw/LO93PaNqCya01o0vYFrSs9SG1",
+ "qnQmrgI/B8JiY5D6duC6fn7r79kVZTpZCOnejmM1l35nDTSfuUSRnV+b3Ey9L5hwKvgxjIGO/jqri2VF",
+ "P3ZdJLGvzkUw0MiHZPrPjbs0dD8ia68dj+8+GLaMpRgc12+8aSezGb7HXAmlZ5Pr6ceOpy38+KEmgY/1",
+ "XeFI4frD9f8EAAD//4XxG93x1QAA",
}
// GetSwagger returns the content of the embedded swagger specification file
diff --git a/daemon/algod/api/server/v2/generated/participating/public/routes.go b/daemon/algod/api/server/v2/generated/participating/public/routes.go
index f197fd7aa..ade11ffe9 100644
--- a/daemon/algod/api/server/v2/generated/participating/public/routes.go
+++ b/daemon/algod/api/server/v2/generated/participating/public/routes.go
@@ -177,181 +177,192 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/+x9/XPcNpLov4I3d1W2dUNJ/kh2rarUPdlOsrrYjstSsrtn+WUxZM8MViTABUDNTPz8",
- "v1+hAZAgCc5wJMXe3PonW0N8NBqNRn/jwyQVRSk4cK0mJx8mJZW0AA0S/6JpKiquE5aZvzJQqWSlZoJP",
- "Tvw3orRkfDGZTpj5taR6OZlOOC2gaWP6TycS/lExCdnkRMsKphOVLqGgZmC9KU3reqR1shCJG+LUDnH2",
- "YvJxyweaZRKU6kP5I883hPE0rzIgWlKuaGo+KbJiekn0kiniOhPGieBAxJzoZasxmTPIM3XoF/mPCuQm",
- "WKWbfHhJHxsQEyly6MP5XBQzxsFDBTVQ9YYQLUgGc2y0pJqYGQysvqEWRAGV6ZLMhdwBqgUihBd4VUxO",
- "3k0U8Awk7lYK7Br/O5cAv0KiqVyAnryfxhY31yATzYrI0s4c9iWoKteKYFtc44JdAyem1yF5VSlNZkAo",
- "J2+/e04eP3781CykoFpD5ohscFXN7OGabPfJySSjGvznPq3RfCEk5VlSt3/73XOc/9wtcGwrqhTED8up",
- "+ULOXgwtwHeMkBDjGha4Dy3qNz0ih6L5eQZzIWHkntjGd7op4fyfdVdSqtNlKRjXkX0h+JXYz1EeFnTf",
- "xsNqAFrtS4MpaQZ9d5w8ff/h4fTh8cd/e3ea/Lf786vHH0cu/3k97g4MRBumlZTA002ykEDxtCwp7+Pj",
- "raMHtRRVnpElvcbNpwWyeteXmL6WdV7TvDJ0wlIpTvOFUIQ6MspgTqtcEz8xqXhu2JQZzVE7YYqUUlyz",
- "DLKp4b6rJUuXJKXKDoHtyIrluaHBSkE2RGvx1W05TB9DlBi4boQPXNA/LzKade3ABKyRGyRpLhQkWuy4",
- "nvyNQ3lGwguluavUfpcVuVgCwcnNB3vZIu64oek83xCN+5oRqggl/mqaEjYnG1GRFW5Ozq6wv1uNwVpB",
- "DNJwc1r3qDm8Q+jrISOCvJkQOVCOyPPnro8yPmeLSoIiqyXopbvzJKhScAVEzP4OqTbb/l/nP74mQpJX",
- "oBRdwBuaXhHgqcggOyRnc8KFDkjD0RLi0PQcWoeDK3bJ/10JQxOFWpQ0vYrf6DkrWGRVr+iaFVVBeFXM",
- "QJot9VeIFkSCriQfAsiOuIMUC7ruT3ohK57i/jfTtmQ5Q21MlTndIMIKuv7meOrAUYTmOSmBZ4wviF7z",
- "QTnOzL0bvESKimcjxBxt9jS4WFUJKZszyEg9yhZI3DS74GF8P3ga4SsAxw8yCE49yw5wOKwjNGNOt/lC",
- "SrqAgGQOyU+OueFXLa6A14ROZhv8VEq4ZqJSdacBGHHq7RI4FxqSUsKcRWjs3KHDMBjbxnHgwslAqeCa",
- "Mg6ZYc4ItNBgmdUgTMGE2/Wd/i0+owq+fjJ0xzdfR+7+XHR3feuOj9ptbJTYIxm5Os1Xd2DjklWr/wj9",
- "MJxbsUVif+5tJFtcmNtmznK8if5u9s+joVLIBFqI8HeTYgtOdSXh5JIfmL9IQs415RmVmfmlsD+9qnLN",
- "ztnC/JTbn16KBUvP2WIAmTWsUYULuxX2HzNenB3rdVSveCnEVVWGC0pbiutsQ85eDG2yHXNfwjyttd1Q",
- "8bhYe2Vk3x56XW/kAJCDuCupaXgFGwkGWprO8Z/1HOmJzuWv5p+yzE1vXc5jqDV07K5kNB84s8JpWeYs",
- "pQaJb91n89UwAbCKBG1aHOGFevIhALGUogSpmR2UlmWSi5TmidJU40j/LmE+OZn821Fjfzmy3dVRMPlL",
- "0+scOxmR1YpBCS3LPcZ4Y0QftYVZGAaNn5BNWLaHQhPjdhMNKTHDgnO4plwfNipLix/UB/idm6nBt5V2",
- "LL47KtggwoltOANlJWDb8J4iAeoJopUgWlEgXeRiVv9w/7QsGwzi99OytPhA6REYCmawZkqrB7h82pyk",
- "cJ6zF4fk+3BsFMUFzzfmcrCihrkb5u7WcrdYbVtya2hGvKcIbqeQh2ZrPBqMmH8XFIdqxVLkRurZSSum",
- "8Z9c25DMzO+jOv8+SCzE7TBxoaLlMGd1HPwlUG7udyinTzjO3HNITrt9b0Y2ZpQ4wdyIVrbupx13Cx5r",
- "FK4kLS2A7ou9SxlHJc02srDekpuOZHRRmIMzHNAaQnXjs7bzPEQhQVLowPAsF+nVn6ha3sGZn/mx+scP",
- "pyFLoBlIsqRqeTiJSRnh8WpGG3PETENU8MksmOqwXuJdLW/H0jKqabA0B29cLLGox37I9EBGdJcf8T80",
- "J+azOduG9dthD8kFMjBlj7NzMmRG27cKgp3JNEArhCCFVfCJ0br3gvJ5M3l8n0bt0bfWpuB2yC0Cd0is",
- "7/wYPBPrGAzPxLp3BMQa1F3QhxkHxUgNhRoB3wsHmcD9d+ijUtJNH8k49hgkmwUa0VXhaeDhjW9maYyz",
- "pzMhb8Z9OmyFk8bkTKgZNWC+0w6SsGlVJo4UI2Yr26AzUOPl2840usPHMNbCwrmmvwEWlBn1LrDQHuiu",
- "sSCKkuVwB6S/jDL9GVXw+BE5/9PpVw8f/fLoq68NSZZSLCQtyGyjQZH7TjcjSm9yeNBfGWpHVa7jo3/9",
- "xBsq2+PGxlGikikUtOwPZQ2gVgSyzYhp18daG8246hrAMYfzAgwnt2gn1rZvQHvBlJGwitmdbMYQwrJm",
- "low4SDLYSUz7Lq+ZZhMuUW5kdReqLEgpZMS+hkdMi1TkyTVIxUTEm/LGtSCuhRdvy+7vFlqyooqYudH0",
- "W3EUKCKUpdd8PN+3Q1+seYObrZzfrjeyOjfvmH1pI99bEhUpQSZ6zUkGs2rR0oTmUhSEkgw74h39Pejz",
- "DU/RqnYXRDqsphWMo4lfbXga6Gxmo3LIFq1NuL1u1sWKt8/Zqe6pCDgGHS/xM6r1LyDX9M7ll+4EMdif",
- "+420wJLMNEQt+CVbLHUgYL6RQszvHsbYLDFA8YMVz3PTpy+kvxYZmMVW6g4u42awhtbNnoYUTmei0oQS",
- "LjJAi0ql4tf0gOceXYbo6dThza+XVuKegSGklFZmtVVJ0I/X4xxNx4SmlnoTRI0a8GLU7ifbyk5nvcK5",
- "BJoZrR44ETPnKnBODFwkRSek9hedExIiZ6kFVylFCkpBljgTxU7QfDvLRPQWPCHgCHA9C1GCzKm8NbBX",
- "1zvhvIJNgi5zRe7/8LN68Bng1ULTfAdisU0MvbXC5/xBfajHTb+N4LqTh2RHJRDPc412aRhEDhqGULgX",
- "Tgb3rwtRbxdvj5ZrkOiZ+U0p3k9yOwKqQf2N6f220FblQCCYU3QuWIF2O065UJAKnqnoYDlVOtnFlk2j",
- "ljZmVhBwwhgnxoEHhJKXVGnrTWQ8QyOIvU5wHiugmCmGAR4USM3IP3tZtD92au5BripVC6aqKkshNWSx",
- "NXBYb5nrNazrucQ8GLuWfrUglYJdIw9hKRjfIcuuxCKI6tro7tzt/cWhadrc85soKltANIjYBsi5bxVg",
- "NwyGGQCEqQbRlnCY6lBOHYEznSgtytJwC51UvO43hKZz2/pU/9S07RMX1c29nQlQGIPj2jvIVxazNgxq",
- "SY0KjSOTgl4Z2QMVYuv27MNsDmOiGE8h2Ub55liem1bhEdh5SKtyIWkGSQY53fQH/cl+JvbztgFwxxvF",
- "R2hIbDxLfNMbSvbhA1uGFjieigmPBL+Q1BxBo3k0BOJ67xg5Axw7xpwcHd2rh8K5olvkx8Nl262OjIi3",
- "4bXQZsctOSDEjqGPgXcADfXIN8cEdk4ataw7xV9BuQlqMWL/STaghpbQjL/XAgaMaS5SODguHe7eYcBR",
- "rjnIxXawkaETO2DZe0OlZikrUdX5ATZ3rvl1J4j6m0gGmrIcMhJ8sFpgGfYnNhCjO+bNNMFRRpg++D0r",
- "TGQ5OVMo8bSBv4INqtxvbITfRRAXeAeqbGRUcz1RThBQHzdkJPCwCaxpqvONkdP0EjZkBRKIqmYF09pG",
- "7rY1XS3KJBwgauDeMqPz5tjoOL8DY9xL5zhUsLz+VkwnViXYDt9FRy9oocOpAqUQ+QjjUQ8ZUQhGOf5J",
- "KcyuMxdE7MNIPSW1gHRMG1159e1/T7XQjCsgfxUVSSlHjavSUIs0QqKcgPKjmcFIYPWczsXfYAhyKMAq",
- "kvjl4KC78IMDt+dMkTmsfOS9adhFx8EBmnHeCKVbh+sOTIXmuJ1Frg+0/OO954IXOjxlt4vZjTxmJ990",
- "Bq/dBeZMKeUI1yz/1gygczLXY9Ye0sg49zqOO8qoHwwdWzfu+zkrqpzqu3BfbJVHa32CFQVkjGrIN6SU",
- "kIKNrjYClrKwGNCIjbtKl5QvUK6Wolq4wB87DjLGSlkLhqx4b4io8KHXPFlIUZUxRumCPX2AvRE7gBrN",
- "J0AkdrZy/orW87mcijE3mEd4sDvfmzGHvArTyaBiaJB63SiGFjntLIE4FjDtIVFVmgJEQ4BjKle91E42",
- "ZJPf4gY0YkMlbQwUoamuaB5SHTmbE8o37TRJynJluCBTBNuZzk1c7dSuzeewzGlufbORpIrwpLQkvmDn",
- "G5R2UTHS74BEYqShPmWEBGiOlyHj38aG3wwdg7I/cRB01Xwcirsy+ne+uQMxyA5EJJQSFF5aod1K2a9i",
- "HuY+uVtNbZSGom/at11/GWA0bwcVSMFzxiEpBIdNNN2XcXiFH6OMAy/Ogc4owgz17WolLfg7YLXnGUON",
- "t8Uv7nbAi97UAYd3sPndcTtenTDrC62WkJeEkjRnaNMUXGlZpfqSU7SaBIctEpjh9cNhO9pz3yRuuIvY",
- "1dxQl5xiUE5tS4k6k+cQMRx8B+DNaapaLEB1+CeZA1xy14pxUnGmca7C7FdiN6wEidERh7ZlQTeGBaLZ",
- "71eQgswq3ebJmHmitGGX1sVkpiFifsmpJjkYnfoV4xdrHM67aD3NcNArIa9qLMSvkAVwUEwl8QCS7+1X",
- "jO1zy1+6OD/MFLafrVPCjN+kp2zQqNJkv/6/+/958u40+W+a/HqcPP2Po/cfnnx8cND78dHHb775/+2f",
- "Hn/85sF//ntspzzssbwIB/nZC6esnb1AibzxSvRg/2QW6YLxJEpkoe+9Q1vkPuYAOgJ60LbX6CVccr3m",
- "hpCuac4yI3LdhBy6LK53Fu3p6FBNayM69hm/1j3l3FtwGRJhMh3WeONrvB9zFc9AQjeZSyrC8zKvuN1K",
- "L+jaAHsf+yLm0zrLzBagOCGYgrSkPnDL/fnoq68n0yZ1qP4+mU7c1/cRSmbZOiodwjqmvrgDggfjniIl",
- "3SgYEEAR9miYj402CIctwOi9asnKT88plGazOIfzYcvODLLmZ9zGE5vzg063jbPli/mnh1tLI4eXehlL",
- "TG9JCtiq2U2ATiBEKcU18Clhh3DYNUNkRjVzAUc50DkmSKOiJ8akYdTnwBKap4oA6+FCRun6MfpB4dZx",
- "64/Tibv81Z3L427gGFzdOWsPm/9bC3Lv+28vyJFjmOqezVW0QwfZZRGt1SVQtEJkDDez5ThssuYlv+Qv",
- "YM44M99PLnlGNT2aUcVSdVQpkM9oTnkKhwtBTnxOxguq6SXvSVqDFXOCbBhSVrOcpeQqlIgb8rRVEPoj",
- "XF6+o/lCXF6+70UL9OVXN1WUv9gJkhXTS1HpxOVwJxJWVMa8MarO4cWRbZGGbbNOiRvbsmKXI+7Gj/M8",
- "Wpaqm8vXX35Z5mb5ARkql6lmtowoLaSXRYyAYqHB/X0t3MUg6cqbMCoFivytoOU7xvV7klxWx8ePgbSS",
- "2/7mrnxDk5sSRhsyBnMNu/YLXLjVa2CtJU1Kuoh5fS4v32mgJe4+yssFKtl5TrBbK6nOBw3jUM0CPD6G",
- "N8DCsXeCEC7u3Pby9XriS8BPuIXYxogbjSv6pvsVpNndeLs6qXq9Xar0MjFnO7oqZUjc70xdxmNhhCwf",
- "H6DYAmMwXcWTGZB0CemVK0UBRak301Z3H4LiBE3POpiyRUpskgymyaPNfAakKjPqRPGuBWm2IQq09kGg",
- "b+EKNheiybLfJ0G5nS+rhg4qUmogXRpiDY+tG6O7+S7OCU1cZenTTjH/yJPFSU0Xvs/wQbYi7x0c4hhR",
- "tPI5hxBBZQQRlvgHUHCDhZrxbkX6seUZLWNmb75IwRLP+4lr0ihPLiQpXA0auO33ArDikVgpMqNGbheu",
- "WI/NCQ24WKXoAgYk5NBtMTLzsuXqwEF23XvRm07Muxda776JgmwbJ2bNUUoB88WQCioznUA0P5P1jDkn",
- "ANbgcwib5Sgm1RF7lulQ2XIf2aJiQ6DFCRgkbwQOD0YbI6Fks6TK1xHCckv+LI+SAX7DHOdtlS1Cg35Q",
- "U6m2r3ue2z2nPe3S1bfwRS18JYtQtRxRlcJI+Bi2HdsOwVEAyiCHhV24bewJpcm3bjbIwPHjfJ4zDiSJ",
- "hWNRpUTKbCGo5ppxc4CRjw8IsSZgMnqEGBkHYKPHFwcmr0V4NvliHyC5yxenfmz0FQd/Qzy1xQYoG5FH",
- "lIaFswEHUuo5AHUxfPX91YkkxWEI41Ni2Nw1zQ2bcxpfM0ivwAKKrZ1yCi7m4MGQOLvFAm8vlr3WZK+i",
- "m6wmlJk80HGBbgvEM7FObG5bVOKdrWeG3qMx25hpFzuYtpTFPUVmYo1xLHi12BjhHbAMw+HBCDT8NVNI",
- "r9hv6Da3wGybdrs0FaNChSTjzHk1uQyJE2OmHpBghsjlflCd4kYAdIwdTalXp/zuVFLb4kn/Mm9utWlT",
- "dcmnw8SO/9ARiu7SAP76Vpi6nsSbrsQStVO0wzHapTQCETJG9IZN9J00fVeQghxQKUhaQlRyFXPdGd0G",
- "8MY5990C4wUW7KB88yCI8ZGwYEpDY0T3IQmfwzxJsU6YEPPh1elSzs363gpRX1O2EA12bC3zk68AY2Tn",
- "TCqdoAciugTT6DuFSvV3pmlcVmpHEdmqmiyL8wac9go2ScbyKk6vbt4fXphpX9csUVUz5LeM29iQGVaB",
- "jcYWbpnahp9uXfBLu+CX9M7WO+40mKZmYmnIpT3H7+RcdDjvNnYQIcAYcfR3bRClWxhkkBLa546B3GQP",
- "J6aEHm6zvvYOU+bH3hk24hNTh+4oO1J0LYHBYOsqGLqJjFjCdFBEtZ+rOXAGaFmybN2xhdpRBzVmupfB",
- "w5ee6mABd9cNtgMDgd0zli4iQbWrjDUCvi2H2yrycTgKMxftWmAhQwinYsoXc+8jqk4n24WrC6D5D7D5",
- "2bTF5Uw+Tie3M53GcO1G3IHrN/X2RvGMrnlrSmt5QvZEOS1LKa5pnjgD8xBpSnHtSBObe3v0J2Z1cTPm",
- "xbenL9848D9OJ2kOVCa1qDC4KmxX/m5WZQuaDRwQXyza6HxeZreiZLD5dRWm0Ci9WoKruhtIo73ygI3D",
- "ITiKzkg9j0cI7TQ5O9+IXeIWHwmUtYukMd9ZD0nbK0KvKcu93cxDOxDNg4sbV2MyyhXCAW7tXQmcZMmd",
- "spve6Y6fjoa6dvCkcK4tdYELW/paEcG7LnQML96UzuteUCzuZ60ifebEqwItCYnKWRq3sfKZMsTBre/M",
- "NCbYeEAYNSNWbMAVyysWjGWaqRGKbgfIYI4oMn2hyCHczYR71qTi7B8VEJYB1+aTxFPZOahYTdFZ2/vX",
- "qZEd+nO5ga2Fvhn+NjJGWNiye+MhENsFjNBT1wP3Ra0y+4XWFikMt25cEns4/MMZe1fiFme9ow9HzTZ4",
- "cdn2uIWvkPT5nyEMW4569xMoXnl1FTYH5og+acJUMpfiV4jreageR1JxfClPhlEuvwIfEXPeWHeal1ma",
- "2Qe3e0i6Ca1Q7SCFAarHnQ/cclhT0FuoKbdbbV8YaMW6xQkmjCo9suM3BONg7kXi5nQ1o7GCi0bIMDCd",
- "Ng7gli1dC+I7e9yrOrHBzk4CX3Ldltks6xJkkyXXr9hyQ4HBTjtaVGgkA6TaUCaYWv9frkRkmIqvKLcP",
- "VZh+9ii53gqs8cv0WgmJNRJU3OyfQcoKmsclhyztm3gztmD2DYZKQVDk3w1k37exVOQeSqjTdRxqzubk",
- "eBq8NOJ2I2PXTLFZDtjioW0xowo5eW2IqruY5QHXS4XNH41ovqx4JiHTS2URqwSphTpUb2rn1Qz0CoCT",
- "Y2z38Cm5j247xa7hgcGiu58nJw+fotHV/nEcuwDcGxrbuEmG7OTPjp3E6Rj9lnYMw7jdqIfRdHL7iNYw",
- "49pymmzXMWcJWzpet/ssFZTTBcQjRYodMNm+uJtoSOvghWf2BRilpdgQpuPzg6aGPw1Enxv2Z8EgqSgK",
- "pgvn3FGiMPTUVPC3k/rh7HMyrviqh8t/RB9p6V1EHSXy0xpN7f0WWzV6sl/TAtponRJqC2PkrIle8CWh",
- "yZmvu4PVaOsitBY3Zi6zdBRzMJhhTkrJuEbFotLz5I8kXVJJU8P+DofATWZfP4lU4G1XguT7Af7J8S5B",
- "gbyOo14OkL2XIVxfcp8LnhSGo2QPmmyP4FQOOnPjbrsh3+H2occKZWaUZJDcqha50YBT34rw+JYBb0mK",
- "9Xr2ose9V/bJKbOScfKgldmhn96+dFJGIWSsmF5z3J3EIUFLBtcYuxffJDPmLfdC5qN24TbQf17Pgxc5",
- "A7HMn+WYIvBMRLRTXxW6tqS7WPWIdWDomJoPhgxmbqgpaVfg/fROP2987jufzBcPK/7RBfYzbyki2a9g",
- "YBOD6uDR7czq74H/m5JnYj12UzsnxG/sPwFqoiipWJ793GRldoqvS8rTZdSfNTMdf2meiaoXZ++naM26",
- "JeUc8uhwVhb8xcuMEan272LsPAXjI9t268Hb5XYW1wDeBtMD5Sc06GU6NxOEWG0nvNUB1flCZATnaQqk",
- "Ndyz/45AUO35HxUoHUsewg82qAvtlkbftcWGCfAMtcVD8r19CXYJpFX+BrW0uoqAK31rDepVmQuaTbGQ",
- "w8W3py+JndX2sY+d2GLHC1RS2qvo2KuC2o/jwoP9uyXx1IXx42yPpTarVhqrUSlNizKWHGpaXPgGmIEa",
- "2vBRfQmxc0heBG862jxSM4ShhzmThdG46tGs7II0Yf6jNU2XqJK1WOowyY+v0u2pUgUv49Uv3NQFEfHc",
- "GbhdoW5bp3tKhNGbV0zZB0DhGtr5qHVytjMJ+PzU9vJkxbmllKjssa14wE3Q7oGzgRrezB+FrIP4PQVy",
- "W+R+36Ll59grWqCpWwG99ySezW6sXy7xDzunlAvOUiyPFLua3UuhY3xgIypJdY2s/oi7Exo5XNG663WY",
- "nMPiYCV2zwgd4vpG+OCr2VRLHfZPjU9SLqkmC9DKcTbIpv75AGcHZFyBK3CJ78oGfFLIll8ROWTUVZ3U",
- "Lo09yQjTYgYUu+/Mt9dO7cd48SvGUcB3aHOh6dZShw8ZaqMVME0WApRbTzs3WL0zfQ4xTTaD9ftD//Ch",
- "rQaDbjmzbOuD7g916j3SzgNs2j43bV2doPrnVgSynfS0LN2kw49LROUBveaDCI54FhPv2gmQW48fjraF",
- "3LaGkuB9aggNrtERDSXewz3CqB9a6DziY4RWS1HYgtgQrmgFA8YjYLxkHJpnOSMXRBq9EnBj8LwO9FOp",
- "pNqKgKN42gXQHL3PMYamtHM93Haobi0hgxJco59jeBubNyIGGEfdoBHcKN/Ur4Ea6g6Eief4DLFDZP/F",
- "B5SqnBCVYUZB5w2IGOMwjNu/MtO+APrHoC8T2e5aUnty9rmJhpJEZ1W2AJ3QLItVpHqGXwl+9cWlYA1p",
- "VRemLEuSYk2UdpGYPrW5iVLBVVVsmcs3uOV0waMqEWoIH3bxO4xJKLMN/huryji8My4IY+8wQB9x4V6h",
- "2FNubo/Uk3oNTSeKLZLxmMA75fboaKa+GaE3/e+U0nOxaAPyiUtDbONy4R7F+Nu35uIIKyf0So3aq6Uu",
- "bIBBd8I/hYdqY52S2+ZKeJX1ao+is6d+amu7AWL40awpXn4DobdBQQxq71frPRwKwE0H48WpdplrmpKt",
- "LGgwG8hG79i8H4QibjkditixATvmc6/3OMmwJ2fj2FsR6kPB+gD94ONMSUmZc403zKKPWReRPmwu3Hbo",
- "mg3uLsLFeQ9a7H64HorJJorxRQ4Ev3efGboCl85evzNv1+qjkrxKaH91z7za8eqo+Oj6+9EJONXnNYMO",
- "Gm0vXEl7u0ynk//ws41hI8C13PwTmHB7m957pKkv7VrzVNOE1OWQR5VHbt2K8feWhusfNTWPkJ5KoVhT",
- "gjv2ENPIWLcLfEspqN/UH8sHmlxDqrHueuNAlwD7VHMykwWP/H2pgzSgO9Yhga780baaR/1i6zsutF5a",
- "UpBaZwtVH46v8HNah0khU8IKuAvg7p29dsLB6LDn+RxSza53pIH9eQk8SDGaeiOEfS83yApjdRgtVhHZ",
- "38TWALQtS2srPEE1v1uDM5QEcgWbe4q0qCFaOXvq75WbFJBADCB3SAyJCBULQ7BWU+cZZqqmDMSCD/ux",
- "3aEpxTX45k6Q1HjDuTxJmhu3SXTcMmX80Y9Rc5mue6X/YkToUKZY/9GAYWH7Bb7RoOr38HwBilAlJWf9",
- "Mn0rV8ACk/ZqR4EvZQHK/+YzdO0sObuC8FUgdMusqMx8i6idwZswki33US+9yxe87wI9r2dmTZBmP6En",
- "UvgJQ3HTXBj5KxmKZ27HRYaP52P0hy35jRGfBq45SPd6Ggp7uVCQaOGDOrfBsQ0V7qH3myBBDRZbtMAN",
- "lkB529R4waKzFEueUBfZEi6QSCiogU4GlViG59yG7Of2u89g8UVHd5pTanrdXWjeh+cy1UNiSPVz4m7L",
- "3ZkxN7GsMM7tW60qVpaFG1SGpv9SiqxK7QUdHoza+jS66NEWVhI1SqT9Vfb0yxxLgL0M8gyvYHNkRX9f",
- "qt9vZQi9FaHsGoK8/s5u36nRKa5f5wu7gMWdwPk5DTfTSSlEngzY+s/61WW6Z+CKpVeQEXN3+MC2gWdL",
- "yH00MdfO3NVy46uplCVwyB4cEnLKbSix9+u2yxt3Juf39Lb51zhrVtmCT86mdHjJ4zGZWIpJ3pK/+WG2",
- "czUFhvndcio7yI7aJeuByjaSriKP+ByOVUr7ntbuwyoNUVkoYlLKjicsIl5k/yaCf2HDZ6xoUbC0/4pC",
- "T5SY42tUCY0MflYz8GnrrUDWebjD1xiyzzSk1ApwRnmgLK8kuMwB+2xOp5x+SfXSb59p3hezzJUNCsP6",
- "bUl2qqxS4JUT92ZP91yIMsnhGlqOBJfOUKUpKMWuIXzvx3YmGUCJqnr3AolZyEO66vAQt/YksLGOwW6U",
- "qVjE2p0iOzjGwGPsiSUPNZaEDETXLKtoC3/qFk+xjHzbPYR15AnZ+3DEF9c7Gu65lKQu5hYzZLp0Er+F",
- "hn6bp106AlLwBEs95sBblDUWbiOMDKI2jtmbFdEYRQ99m3bkyATPrmy3vIQ1dprgXWldI6ip+VPX3dJX",
- "zWkc9wCM77ADvNAgFzwB4yUhB85njrB9VSMlWMogJbSWv8vG5xbYsK9giyzvNsu0Fc9sdFZ7XwIDrnpe",
- "20WH3mXqmk+xoI7gWGSsb3ZV6CrDWuUh4RjeLa9p/ulNp1hp6RTx4d65jS80tL2FSLaoVDcLc3tJR80d",
- "2Nnubmr+Bk29fwazR1EfpxvK+TxqWcF7hpBl0pzkonnhDockKxzTOkUffk1mLkWnlJAyxTrZiytfRrk2",
- "NeGrAs3zx9ttW7vW+bPQtyDjuRdfyOumJKsWeGM0EDZH9DMzlYGTG6XyGPX1yCKCvxiPCmtl7Lgurlre",
- "UlviuhMGKCTcsdc0iH/a02varwIydnnWM2gunUpBf52jb+sWbiMXdbO2sS7/PnK31e0c46mPl+M13TFU",
- "wCIEa1kTBJX87eHfiIQ5PlYjyMEBTnBwMHVN//ao/dkc54OD+DPLnypIwOLIjeHmjVHMz0Nh4zY0eiBD",
- "obMfFcuzXYTRyjdpnnvCjIpfXMbZZ3lw6hfry+kfVffoxz7hSd1NQMRE1tqaPJgqyCQZkUTiukVSRtAq",
- "klaS6Q0WwvGmf/ZLNJzh+9pb6LzNdekEd/dpcQV1KaXGt1gpf7t+L2iO95GRqTE4TOPTut+uaVHm4A7K",
- "N/dmf4DHf3ySHT9++IfZH4+/Ok7hyVdPj4/p0yf04dPHD+HRH796cgwP518/nT3KHj15NHvy6MnXXz1N",
- "Hz95OHvy9dM/3DN8yIBsAZ34tOvJX/BVtuT0zVlyYYBtcEJLVr+obcjYPy1DUzyJUFCWT078T//Xn7DD",
- "VBTN8P7XicvqnCy1LtXJ0dFqtToMuxwt0JmQaFGlyyM/T/8l4zdndWaOVS1xR23ShTcZeFI4xW9vvz2/",
- "IKdvzg6DlzJPJseHx4cP8SHFEjgt2eRk8hh/wtOzxH0/csQ2OfnwcTo5WgLN0fdu/ihAS5b6T2pFFwuQ",
- "h+6NHfPT9aMjL0ocfXCOlI/bvh2F5aqPPrT8TdmOnljO9uiDr9KyvXWrDIrzs5nlLmK1i76H4CHnoJZ+",
- "y84/23hX0ZQofC/e/FRKJsxJmpprMYNUAkW6FxIzY5onoZ3+Ahz/++r0L+jpe3X6F/INOZ66hCmFqkZs",
- "emtLrUngLLNgR54sf7Y5rT2XQQ3Hk3exV8Zjb//gETL0EVB4PWLDwbSsIKwt2PBjw2OPk6fvP3z1x48x",
- "Oa//5qRH0sCT4lr4SiaItIKuvxlC2doZ1My4/6hAbppFFHQ9CQHu+38jz0/O2aKSnWd1Ow/2EqbIf53/",
- "+JoISZxe+4amV3XslAEZC3RIcc0wrSQLcpFMzyGI3ZUXAu2L9rtknUItynZke43m91j9AAHFg/7o+PjL",
- "2/j/Gm/jT1tb62nky+7+79jdvrxASmHONMPUvubK8ddZC8im+rMDd8CFeEj+KiqU6uz7LhArt4YzoDHa",
- "z+liIIJQu8YTgl8ODroLPzhwe84UmcMKmSzl2LCLjoMDfBDwyZ6sbKsFuRUfP+rs7DNcb7Ne0XVd5YoS",
- "LnjC8fmRayCBKvjk+OHvdoVnHAPwjDhKrLj9cTr56ne8ZWfcCDY0J9jSrubx73Y15yCvWQrkAopSSCpZ",
- "viE/8Tp9OiiZ1md/P/ErLlbcI8JoklVRULlxQjSteU7Fg4T2rfynF7vQCNrIRelCoZsbRdRJ65ktvpi8",
- "/+h1gJGKxbZmRzOs5zK2Kaig8bB2gj4DdfQBrd6Dvx+5Ihfxj+h9sGrtkY+zjLdsKT4f9NrA2umRUp0u",
- "q/LoA/4H1cwALJtS1gcX1iVIZng5RpS6X21JiCP7an3/5w1Poz/2h+++nhb7+ehDu3p/C81qWelMrIK+",
- "aG23rqL+fPV7Vq2/j1aUaSM3uBBarK7Y76yB5kcuObzza5OP1fuCSWbBjx1JoxS2fkdbyXtLVxctX7K0",
- "BT2eiWyzhQetkxnjeDBDxtHY0OzHvtbQf6p7CbYosXdDRsQyLchMCpqlVGHRPldGoacufrylStKRJtdn",
- "EScTgokaeD8a0xyxw52eBxx3z1fMg1q3KP8q5V8j/y1llR5Ez2hGfMGXhLyiudlwyMipk4hb2Pit5YzP",
- "Lxh85pv8k129z/zhU4RivFlLZ5KRmB8XGeUO6ph71ihWhgEsgCeOBSUzkW186WZJV3pto9O6zO2orsEd",
- "/XgHxrl/bovcLkPcF/vXF/vXFwvJF/vXl939Yv8aaf/6Yh36Yh36l7QO7WMSiomZziQyLG1iHUzamtfq",
- "drRJwaxZfDsHgOlaJuuXPGb6kJALTHCj5paAa5A0x2chVJCxWmC4IWYSQHZyyZMWJDaoz0x8v/mvjaZ0",
- "r+4fP+j2UZrlecib+31R3sVPthbMN+RycjnpjSShENeQ2bz5MOHH9to57P+px/2xlzuIKdf41rNPOCCq",
- "ms9ZyizKc8EXhC5EEwls+DbhAr+ANMDZCgyE6akr6cEUWZnFu2qk7byktuTelwDOmi3c6UnvkEvciW4I",
- "b08P+n+McZ//S0vpt0hhuBUj3Tp2j6t+4Sqfgqt8dr7ye/dNBubD/5Vi5pPjJ7/bBYXG5tdCk+8wyv12",
- "4lhd4TlWiOKmgpYvF+7NfU2kbBh5irdoHXP67r25CPC5FXfBNoGUJ0dHmOq+FEofTcz11w6yDD++r2H2",
- "dfgnpWTXWNbv/cf/CQAA///C44qjANcAAA==",
+ "H4sIAAAAAAAC/+y9fXPcNpIw/lXwm7sq27qhJL8ku1ZV6n6ynWR1sR2XpWR3z/KTxZA9M1iRAAOAo5n4",
+ "8Xd/Cg2ABElwhiMp9ubOf9ka4qXRaDT6Dd0fJqkoSsGBazU5+TApqaQFaJD4F01TUXGdsMz8lYFKJSs1",
+ "E3xy4r8RpSXji8l0wsyvJdXLyXTCaQFNG9N/OpHwa8UkZJMTLSuYTlS6hIKagfWmNK3rkdbJQiRuiFM7",
+ "xNmLycctH2iWSVCqD+WPPN8QxtO8yoBoSbmiqfmkyDXTS6KXTBHXmTBOBAci5kQvW43JnEGeqUO/yF8r",
+ "kJtglW7y4SV9bEBMpMihD+dzUcwYBw8V1EDVG0K0IBnMsdGSamJmMLD6hloQBVSmSzIXcgeoFogQXuBV",
+ "MTl5N1HAM5C4WymwFf53LgF+g0RTuQA9eT+NLW6uQSaaFZGlnTnsS1BVrhXBtrjGBVsBJ6bXIXlVKU1m",
+ "QCgnb797Th4/fvzULKSgWkPmiGxwVc3s4Zps98nJJKMa/Oc+rdF8ISTlWVK3f/vdc5z/3C1wbCuqFMQP",
+ "y6n5Qs5eDC3Ad4yQEOMaFrgPLeo3PSKHovl5BnMhYeSe2MZ3uinh/J91V1Kq02UpGNeRfSH4ldjPUR4W",
+ "dN/Gw2oAWu1LgylpBn13nDx9/+Hh9OHxx397d5r8t/vzq8cfRy7/eT3uDgxEG6aVlMDTTbKQQPG0LCnv",
+ "4+Otowe1FFWekSVd4ebTAlm960tMX8s6VzSvDJ2wVIrTfCEUoY6MMpjTKtfET0wqnhs2ZUZz1E6YIqUU",
+ "K5ZBNjXc93rJ0iVJqbJDYDtyzfLc0GClIBuitfjqthymjyFKDFw3wgcu6F8XGc26dmAC1sgNkjQXChIt",
+ "dlxP/sahPCPhhdLcVWq/y4pcLIHg5OaDvWwRd9zQdJ5viMZ9zQhVhBJ/NU0Jm5ONqMg1bk7OrrC/W43B",
+ "WkEM0nBzWveoObxD6OshI4K8mRA5UI7I8+eujzI+Z4tKgiLXS9BLd+dJUKXgCoiY/RNSbbb9v85/fE2E",
+ "JK9AKbqANzS9IsBTkUF2SM7mhAsdkIajJcSh6Tm0DgdX7JL/pxKGJgq1KGl6Fb/Rc1awyKpe0TUrqoLw",
+ "qpiBNFvqrxAtiARdST4EkB1xBykWdN2f9EJWPMX9b6ZtyXKG2pgqc7pBhBV0/c3x1IGjCM1zUgLPGF8Q",
+ "veaDcpyZezd4iRQVz0aIOdrsaXCxqhJSNmeQkXqULZC4aXbBw/h+8DTCVwCOH2QQnHqWHeBwWEdoxpxu",
+ "84WUdAEByRySnxxzw69aXAGvCZ3MNviplLBiolJ1pwEYcertEjgXGpJSwpxFaOzcocMwGNvGceDCyUCp",
+ "4JoyDplhzgi00GCZ1SBMwYTb9Z3+LT6jCr5+MnTHN19H7v5cdHd9646P2m1slNgjGbk6zVd3YOOSVav/",
+ "CP0wnFuxRWJ/7m0kW1yY22bOcryJ/mn2z6OhUsgEWojwd5NiC051JeHkkh+Yv0hCzjXlGZWZ+aWwP72q",
+ "cs3O2cL8lNufXooFS8/ZYgCZNaxRhQu7FfYfM16cHet1VK94KcRVVYYLSluK62xDzl4MbbIdc1/CPK21",
+ "3VDxuFh7ZWTfHnpdb+QAkIO4K6lpeAUbCQZams7xn/Uc6YnO5W/mn7LMTW9dzmOoNXTsrmQ0HzizwmlZ",
+ "5iylBolv3Wfz1TABsIoEbVoc4YV68iEAsZSiBKmZHZSWZZKLlOaJ0lTjSP8uYT45mfzbUWN/ObLd1VEw",
+ "+UvT6xw7GZHVikEJLcs9xnhjRB+1hVkYBo2fkE1YtodCE+N2Ew0pMcOCc1hRrg8blaXFD+oD/M7N1ODb",
+ "SjsW3x0VbBDhxDacgbISsG14T5EA9QTRShCtKJAucjGrf7h/WpYNBvH7aVlafKD0CAwFM1gzpdUDXD5t",
+ "TlI4z9mLQ/J9ODaK4oLnG3M5WFHD3A1zd2u5W6y2Lbk1NCPeUwS3U8hDszUeDUbMvwuKQ7ViKXIj9eyk",
+ "FdP4L65tSGbm91Gd/xgkFuJ2mLhQ0XKYszoO/hIoN/c7lNMnHGfuOSSn3b43IxszSpxgbkQrW/fTjrsF",
+ "jzUKryUtLYDui71LGUclzTaysN6Sm45kdFGYgzMc0BpCdeOztvM8RCFBUujA8CwX6dVfqFrewZmf+bH6",
+ "xw+nIUugGUiypGp5OIlJGeHxakYbc8RMQ1TwySyY6rBe4l0tb8fSMqppsDQHb1wssajHfsj0QEZ0lx/x",
+ "PzQn5rM524b122EPyQUyMGWPs3MyZEbbtwqCnck0QCuEIIVV8InRuveC8nkzeXyfRu3Rt9am4HbILQJ3",
+ "SKzv/Bg8E+sYDM/EuncExBrUXdCHGQfFSA2FGgHfCweZwP136KNS0k0fyTj2GCSbBRrRVeFp4OGNb2Zp",
+ "jLOnMyFvxn06bIWTxuRMqBk1YL7TDpKwaVUmjhQjZivboDNQ4+XbzjS6w8cw1sLCuaa/AxaUGfUusNAe",
+ "6K6xIIqS5XAHpL+MMv0ZVfD4ETn/y+lXDx/98uirrw1JllIsJC3IbKNBkftONyNKb3J40F8ZakdVruOj",
+ "f/3EGyrb48bGUaKSKRS07A9lDaBWBLLNiGnXx1obzbjqGsAxh/MCDCe3aCfWtm9Ae8GUkbCK2Z1sxhDC",
+ "smaWjDhIMthJTPsur5lmEy5RbmR1F6osSClkxL6GR0yLVOTJCqRiIuJNeeNaENfCi7dl93cLLbmmipi5",
+ "0fRbcRQoIpSl13w837dDX6x5g5utnN+uN7I6N++YfWkj31sSFSlBJnrNSQazatHShOZSFISSDDviHf09",
+ "aBQFLlgB55oW5Y/z+d2oigIHiqhsrABlZiK2hZHrFaSC20iIHdqZG3UMerqI8SY6PQyAw8j5hqdoZ7yL",
+ "YzusuBaMo9NDbXgaaLEGxhyyRYssb6+tDqHDTnVPRcAx6HiJn9HQ8QJyTb8T8qKxBH4vRVXeuZDXnXPs",
+ "cqhbjDOlZKav16EZX+Tt6JuFgf0wtsbPsqDn/vi6NSD0SJEv2WKpA7XijRRifvcwxmaJAYofrFKWmz59",
+ "1ey1yAwz0ZW6AxGsGazhcIZuQ75GZ6LShBIuMsDNr1RcOBuI10BHMfq3dSjv6aXVs2ZgqCullVltVRL0",
+ "3vbui6ZjQlN7QhNEjRrwXdVOR9vKTmdjAXIJNNuQGQAnYuYcRM51hYuk6HrWXrxxomGEX7TgKqVIQSnI",
+ "EmeY2gmab2evDr0FTwg4AlzPQpQgcypvDezVaiecV7BJMFBCkfs//KwefAZ4tdA034FYbBNDb63mOy9g",
+ "H+px028juO7kIdlRCcTfK0QLlGZz0DCEwr1wMrh/XYh6u3h7tKxAoj/ud6V4P8ntCKgG9Xem99tCW5UD",
+ "4X9OvTUSntkwTrnwglVssJwqnexiy6ZRSwc3Kwg4YYwT48ADgtdLqrT1ITOeoenLXic4jxXCzBTDAA+q",
+ "IWbkn70G0h87NfcgV5Wq1RFVlaWQGrLYGjist8z1Gtb1XGIejF3rPFqQSsGukYewFIzvkGVXYhFEde1q",
+ "cUEW/cWhQ8Lc85soKltANIjYBsi5bxVgNwyBGgCEqQbRlnCY6lBOHXc1nSgtytJwC51UvO43hKZz2/pU",
+ "/9S07RMX1c29nQlQGHnl2jvIry1mbfDbkiri4CAFvTKyB5pBrLO7D7M5jIliPIVkG+WjimdahUdg5yGt",
+ "yoWkGSQZ5HTTH/Qn+5nYz9sGwB1v1F2hIbFRTPFNbyjZB41sGVrgeComPBL8QlJzBI0q0BCI671j5Axw",
+ "7BhzcnR0rx4K54pukR8Pl223OjIi3oYroc2OO3pAkB1HHwPwAB7qoW+OCuycNLpnd4q/g3IT1HLE/pNs",
+ "QA0toRl/rwUM2FBdgHhwXjrsvcOBo2xzkI3t4CNDR3bAoPuGSs1SVqKu8wNs7lz1604QdTOSDDRlOWQk",
+ "+GDVwDLsT2z8TXfMm6mCo2xvffB7xrfIcnKmUORpA38FG9S539jAzsDUcRe6bGRUcz9RThBQHy5mRPCw",
+ "CaxpqvONEdT0EjbkGiQQVc0KprUN2G6rulqUSThA1K+xZUbnxLNBkX4HxngVz3GoYHn9rZhOrE6wHb6L",
+ "jmLQQofTBUoh8hEWsh4yohCMivcgpTC7zlzsuI8e9pTUAtIxbfTg1tf/PdVCM66A/F1UJKUcVa5KQy3T",
+ "CImCAgqQZgYjgtVzusiOBkOQQwFWk8QvBwfdhR8cuD1niszh2j+4MA276Dg4QDvOG6F063DdgT3UHLez",
+ "yPWBDh9z8TktpMtTdkcWuJHH7OSbzuC1l8icKaUc4Zrl35oBdE7meszaQxoZF1WB447y5QRDx9aN+37O",
+ "iiqn+i68VrCieSJWICXLYCcndxMzwb9d0fzHutsOna6JAmNFARmjGvINKSWkYKPzjaim6rEPiY3bS5eU",
+ "L1BCl6JauMAxOw5y2EpZW4iseG+IqBSj1zxBq3KM47pgYf9Aw8gvQI0O1TVJW43hmtbzuTc5Y65Cv3MR",
+ "E33UKzWdDKqYBqmrRsW0yGm/MhnBfVsCVoCfZuKRvgtEnRE2+vgKt8VQr9nc38dG3gwdg7I/cRDK1nwc",
+ "imYz+m2+uQMpww5EJJQSFN4JoV1I2a9iHr4oc5eG2igNRd90brv+MnD83g4qaILnjENSCA6b6CNqxuEV",
+ "foweJ7yXBjqjhDDUtyv0t+DvgNWeZww13ha/uNvdE9p1EanvhLwrH6QdcLQ8PcLlt9O/7aa8qWOS5nnE",
+ "l+fem3QZgJrW79uZJFQpkTIUks4yNbUHzbn/3OOUNvrf1FG0d3D2uuN2nFbhU0Y0ykJeEkrSnKHJVnCl",
+ "ZZXqS07RKBQsNRJt5LXfYTPhc98kbpeMmA3dUJecYqRZbSqKRkjMIWIX+Q7AWwtVtViA0h3lYg5wyV0r",
+ "xknFmca5CnNcEnteSpAY8nNoWxZ0Q+aGJrQgv4EUZFbptriNz6mUZnnuPGhmGiLml5xqkgNVmrxi/GKN",
+ "w3kvuz+yHPS1kFc1FuK3+wI4KKaSeFTU9/YrBqy65S9d8Co+f7efrc/FjN+8udqgzah50v1/7v/nybvT",
+ "5L9p8ttx8vQ/jt5/ePLxwUHvx0cfv/nm/7Z/evzxmwf/+e+xnfKwxx77OMjPXjhV9OwF6huN06UH+ycz",
+ "uBeMJ1EiC8MnOrRF7uPDVkdAD9rWKL2ES67X3BDSiuYsM7zlJuTQvWF6Z9Gejg7VtDaiY33ya91Tir8F",
+ "lyERJtNhjTeWovqBhPFndegFdC/l8LzMK2630kvf9tWID+gS82n9dNJmVTkh+K5uSX00ovvz0VdfT6bN",
+ "e7j6+2Q6cV/fRyiZZevYq8cM1jHlzB0QPBj3FCnpRoGOcw+EPRq7ZoMpwmELMFq9WrLy03MKpdkszuF8",
+ "LL4z8qz5GbdB8ub8oE9x41wVYv7p4dYSIINSL2PZFlqCGrZqdhOgE+dRSrECPiXsEA67RpbM6Isuii4H",
+ "OsdX/6h9ijHaUH0OLKF5qgiwHi5klCUjRj8o8jhu/XE6cZe/unN1yA0cg6s7Z+1A9H9rQe59/+0FOXIM",
+ "U92zD3Dt0MGTyYgq7V4FtSKADDezOWaskHfJL/kLmDPOzPeTS55RTY9mVLFUHVUK5DOaU57C4UKQE//Q",
+ "6AXV9JL3JK3BNFDBEy9SVrOcpeQqVEga8rSpPfojXF6+o/lCXF6+7wVD9NUHN1WUv9gJEiMIi0onLjFB",
+ "IuGaypizSdUP03Fkm3lk26xWyBaVtUj6xAdu/DjPo2Wpug9U+8svy9wsPyBD5Z5fmi0jSgvpZREjoFho",
+ "cH9fC3cxSHrt7SqVAkX+UdDyHeP6PUkuq+Pjx0BaLzb/4a58Q5ObEkZbVwYf0HaNKrhwq1bCWkualHQR",
+ "82ldXr7TQEvcfZSXC7Rx5DnBbq2Xoj4SHodqFuDxMbwBFo69X73h4s5tL5+EKr4E/IRbiG2MuNF42m+6",
+ "X8Hb0RtvV+f9aW+XKr1MzNmOrkoZEvc7U+emWRghy4c/KLZAbdWl8ZkBSZeQXrn8KlCUejNtdfcRNk7Q",
+ "9KyDKZt5x778wtwP6BGYAanKjDpRnPJN9xG+Aq19HO9buILNhWhSR+zz6r79CFwNHVSk1EC6NMQaHls3",
+ "RnfzXRgXKvZl6d9S46M6TxYnNV34PsMH2Yq8d3CIY0TReqQ8hAgqI4iwxD+Aghss1Ix3K9KPLc9oGTN7",
+ "80Wy8HjeT1yTRnlyEVfhatDqbr8XgGm8xLUiM2rkduEyUNmHzgEXqxRdwICEHDplRj4nbjlycJBd9170",
+ "phPz7oXWu2+iINvGiVlzlFLAfDGkgspMJ87Oz2T9fs4zgYklHcJmOYpJdUCiZTpUtpxjNlPeEGhxAgbJ",
+ "G4HDg9HGSCjZLKnyybEwh5g/y6NkgN/x4f62dC1nQYhYkCisTsbieW73nPa0S5e0xWdq8elZQtVyRKoV",
+ "I+FjVHpsOwRHASiDHBZ24baxJ5QmiUCzQQaOH+fznHEgSSzaLDCDBteMmwOMfHxAiLXAk9EjxMg4ABv9",
+ "2TgweS3Cs8kX+wDJXRIE6sdGT3jwN8Tfa9n4ayPyiNKwcDbg1Uo9B6AuRLG+vzqBsjgMYXxKDJtb0dyw",
+ "OafxNYP0soag2NrJEeIiKh4MibNbHCD2YtlrTfYquslqQpnJAx0X6LZAPBPrxD7YjEq8s/XM0Hs0JB2f",
+ "j8YOps3Pck+RmVhjlA5eLTYEegcsw3B4MAINf80U0iv2G7rNLTDbpt0uTcWoUCHJOHNeTS5D4sSYqQck",
+ "mCFyuR+kXLkRAB1jR5O/2Cm/O5XUtnjSv8ybW23apBLzr31ix3/oCEV3aQB/fStMnSTlTVdiidop2sEm",
+ "7fwwgQgZI3rDJvpOmr4rSEEOqBQkLSEquYp5To1uA3jjnPtugfECs9BQvnkQRDBJWDCloTGi+ziJz2Ge",
+ "pJj8Toj58Op0KedmfW+FqK8p60bEjq1lfvIVYAjwnEmlE/RARJdgGn2nUKn+zjSNy0rtGCmbKpZlcd6A",
+ "017BJslYXsXp1c37wwsz7euaJapqhvyWcRuwMsPUxtHIyS1T2+DarQt+aRf8kt7ZesedBtPUTCwNubTn",
+ "+IOciw7n3cYOIgQYI47+rg2idAuDDF689rljIDcFPv7DbdbX3mHK/Ng7o3b8u9uhO8qOFF1LYDDYugqG",
+ "biIjljAdZAbuP0UdOAO0LFm27thC7aiDGjPdy+Dh86l1sIC76wbbgYHA7hl7DSNBtVPnNQK+zfHcylxz",
+ "OAozF+0EdyFDCKdiylco6COqfi23C1cXQPMfYPOzaYvLmXycTm5nOo3h2o24A9dv6u2N4hld89aU1vKE",
+ "7IlyWpZSrGieOAPzEGlKsXKkic29PfoTs7q4GfPi29OXbxz4H6eTNAcqk1pUGFwVtiv/MKuyWfoGDojP",
+ "gG50Pi+zW1Ey2Pw6tVholL5egkslHUijvZyXjcMhOIrOSD2PRwjtNDk734hd4hYfCZS1i6Qx31kPSdsr",
+ "QleU5d5u5qEdiObBxY1LnBrlCuEAt/auBE6y5E7ZTe90x09HQ107eFI415Zk14XN566I4F0XOsY8b0rn",
+ "dS8oZqy0VpE+c+JVgZaEROUsjdtY+UwZ4uDWd2YaE2w8IIyaESs24IrlFQvGMs3G5KTpABnMEUWmiqbF",
+ "aXA3E65WT8XZrxUQlgHX5pPEU9k5qJjexFnb+9epkR36c7mBrYW+Gf42MkaYrbV74yEQ2wWM0FPXA/dF",
+ "rTL7hdYWKfND4JLYw+Efzti7Erc46x19OGq2wYvLtsctLK3T53+GMGyO9d11fbzy6tLGDswRrdPDVDKX",
+ "4jeI63moHkceGvn8tAyjXH6D8KFDWJ2ixWJq605TbqiZfXC7h6Sb0ArVDlIYoHrc+cAth4kyvYWacrvV",
+ "tmxGK9YtTjBhVOmRHb8hGAdzLxI3p9czGssiaoQMA9Np4wBu2dK1IL6zx72qX1vY2UngS67bMvuIvATZ",
+ "vAHsJ6S5ocBgpx0tKjSSAVJtKBNMrf8vVyIyTMWvKbfVV0w/e5RcbwXW+GV6XQuJKSBU3OyfQcoKmscl",
+ "hyztm3gztmC2sEilIKhc4QayRZssFbnqH/UbIoeaszk5ngblc9xuZGzFFJvlgC0e2hYzqpCT14aouotZ",
+ "HnC9VNj80Yjmy4pnEjK9VBaxSpBaqEP1pnZezUBfA3ByjO0ePiX30W2n2AoeGCy6+3ly8vApGl3tH8ex",
+ "C8AVhtnGTTJkJ3917CROx+i3tGMYxu1GPYy+lreV4YYZ15bTZLuOOUvY0vG63WepoJwuIB4pUuyAyfbF",
+ "3URDWgcvPLNljZSWYkOYjs8Pmhr+NBB9btifBYOkoiiYLpxzR4nC0FNTlsJO6oezNZJcRmEPl/+IPtLS",
+ "u4g6SuSnNZra+y22avRkv6YFtNE6JdTm/chZE73g85yTM59WCFMs15mVLW7MXGbpKOZgMMOclJJxjYpF",
+ "pefJn0m6pJKmhv0dDoGbzL5+Ekkr3U5vyvcD/JPjXYICuYqjXg6QvZchXF9ynwueFIajZA+a1x7BqRx0",
+ "5sbddkO+w+1DjxXKzCjJILlVLXKjAae+FeHxLQPekhTr9exFj3uv7JNTZiXj5EErs0M/vX3ppIxCyFiu",
+ "wOa4O4lDgpYMVhi7F98kM+Yt90Lmo3bhNtB/Xs+DFzkDscyf5Zgi8ExEtFOf6ry2pLtY9Yh1YOiYmg+G",
+ "DGZuqClpp5X+9Hz0bqKg4p4ub9juO7bMF48H/KOLiM9MLriBjS/frmSAUIK0+lGSyervgY+dkmdiPZZw",
+ "OqfQE8+/AIqiKKlYnv3cvPzsVC2QlKfLqM9sZjr+0tRXqxdn78Bo2r8l5Rzy6HBW3vzFy6URyfmfYuw8",
+ "BeMj23YLKdjldhbXAN4G0wPlJzToZTo3E4RYbT+qq4O284XICM7T5Jhrjmu/AEeQJv3XCpSOPVDCDzZw",
+ "DG2jhh3YLN0EeIYa6SH53pZQXgJpJRBCTdBnimi/mq7KXNBsihksLr49fUnsrLaPrRJks4QvUBFqr6Jj",
+ "EwvSZ44LQfYFf+LPI8aPsz1e26xa6aRO6h17gGpaNGnHWcdPgCpSiJ1D8iIohmrfqpohDD3MmSyMVleP",
+ "ZuUjpAnzH61pukS1r8Vah0l+fHp7T5UqKClZl4aqc0riuTNwuwz3NsH9lAijm18zZSvnwgrab17rB+DO",
+ "7ODfwLaXJyvOLaUc7nHL1Rkk90W7B85ekd6VEIWsg/g9hX5bHWLfbP/n2Cua4qpbOqBXS9K+oKxL/viK",
+ "6CnlgrMUE0zFrmhXYneMn21ELq6uIdcfcXdCI4crWrCgDsVzWBwsYeAZoUNc39AffDWbaqnD/qmxluuS",
+ "arIArRxng2zq6244WyPjClyOUCzIHPBJIVu+S+SQUXd4UrtN9iQjfHozoDx+Z769dqYFjEm/YhyVCIc2",
+ "J/hZayBWANVG82CaLAQot572+2P1zvQ5xKe4GazfH/qKoTiGdf2ZZVs/d3+oU+/1dl5m0/a5aesSJNU/",
+ "t6Kc7aSnZekmHa7KEpUH9JoPIjjivUy8+yhAbj1+ONoWctsaroL3qSE0WKGzG0q8h3uEUVco6VS/MkKr",
+ "pShsQWyYWDRLAuMRMF4yDk0928gFkUavBNwYPK8D/VQqqbYi4CiedgE0Rw93jKEp7dwbtx2qmx7KoATX",
+ "6OcY3samuMoA46gbNIIb5Zu6jK6h7kCYeI71ux0i+6VSUKpyQlSGrxY6xVNijMMwbl+eqX0B9I9BXyay",
+ "3bWk9uTscxMNPUSdVdkCdEKzLJay9Rl+JfiVZBVKDrCGtKpTe5YlSTHvSjsRTZ/a3ESp4KoqtszlG9xy",
+ "uqAaUYQawopIfofxoctsg//G8loO74wL9Ng71NBHdWT7ZV/qh07GpF5D04lii2Q8JvBOuT06mqlvRuhN",
+ "/zul9Fws2oB84vQT27hcuEcx/vatuTjC7Ay9ZK32aqmTJ2Bgn/A1JFFtrJ/9trkSXmW97K3oUKpr1G03",
+ "QAxXm5vi5TcQ3hsk3aD2frUeyqEg33QwJp1q9zpOU7KVBQ2+OLIRQvZtEUIRt84ORQXZoCDzudd7nGTY",
+ "k7N1PPFhgFAfbtYH6Acfy0pKypz7vWEWfcy6qPf+O4Qx8bDNBncX4WLJBy12P6yG4r59Mjb83q1GdQXu",
+ "yXwpYcVE5R3bPvLJq4T211ZtpzryPrr+vuEVp/q85tBB4+2Fqwpgl+l08h9+tnFyBLiWm38BU25v03t1",
+ "rvrSrjVPNU1InVB6VILp1q04JlFhLCeekw1blbZ21AnrkdWLMeJAv+7XdHKW7XVhxvIqTuwosWMXr+I1",
+ "nHaqSTWFR6wUijV53WPlvUaGGF5gha4gbVZ/LB/fs4JUYzL/Jm5BAuyTRMtMFhQM/ZJ+akCdriMxXdap",
+ "bamm+hn8d9zxvddgwYtGm/38cHxipdM6Og35NGZDXgB3NTvb7zxGR5vP55Bqttrx+u6vS+DBy66pt8vY",
+ "2tvBYzxWRy9j8pb9rY4NQNsex22FJ0iieGtwht7eXMHmniItaoimY5/6q/YmeTsQA8gdEkMiQsWiP6wh",
+ "2TnkmaopA7Hgo61sd2gyoA1Wcgrekt5wLk+S5uJo3pdumTJeSmbUXKbrXq+uMRB36IFevxLFsP7xAgt/",
+ "qLrKos/7EWrp5KyfHfHa5Q3Bt5K178RnEAHlf/MPo+0sObuCsNYUeqquqcx8i6jpxVt1ki33Ue9Vna+i",
+ "0AV6Xs/MmtjY/juqSL4tjIBOc2HEiGQojLwdjlrHctxTNujGpn/HQFsD1xykq8mH8m8uFCRa+FjabXBs",
+ "Q4WNLLoREtRgjksL3GDmmbdNah3M9Usx0wx1AUXhAomEghroZJAAZ3jObch+br/7h0M+1+tOC1NNr7uL",
+ "DvioaKZ6SAypfk7cbbn7QdJNjE2Mc1v3WcWy4XCQbW9IKUVWpfaCDg9GbZAbnWtqCyuJ2mnS/io7OkLw",
+ "qvMKNkdWCfLVGvwOhkBbycmCHmRR6GzynZrfVAzuxZ2A9zktV9NJKUSeDDg7zvopfLoUf8XSK8iIuSl8",
+ "9OBA5RtyH23stTf7ernxKWvKEjhkDw4JOeU2Xts7tts5pDuT83t62/xrnDWrbFYtZ1Q7vOTxwFfMdyVv",
+ "yc38MNt5mALD6m45lR1kR4KY9UD6IEmvI3WgDsdq5X1Xc7c2T0NUFoqYTNKUndkRJ1OHyDSVP5owmb50",
+ "kOfiOkEqSur8XzGdw7RrM0mf8bTpZrA9gyDehip3gW7IkmYkFVJCGvaIP3GwQBVCQpILDL+JeQbn2shD",
+ "BcY1c5KLBRGlUXNtGj3vQ4mWpQnmss9sbc/EOmoGEhmAcs9q3TS2cX+eLdVr9q+Mc7GM2FsQ0R7Le5e/",
+ "cYSyd9WKAMwRBLrb1nQaq+7TXle3PtRQtTYtCpbG0f3HijIZjA3ZUbsosr6aHF1pJf8qcABXUZftdg+p",
+ "rUM3G+snrXMmjzwWAQDDntMWDKP8p/uCMce6jgmNIPmsllqnrbK7rHP2fT47S+MptVrrEogZu5LgXqnZ",
+ "AnSdyjkl1Ut/i5nmfd3S6Cmg8AmZLf9BlbWEeIuMq37XFQ9EmeSwgpZD2T2dq9IUlGIrCCvn2c4kAyjR",
+ "PtmVmmOe0pDLdUQpt/Yk8LWNwW5UtrKItTtFdghOUTFvzRN7TNTYo2QgWrGsoi38qVvUIhsqQxZhwx7W",
+ "kZxibyYRX9w2FrEztgFpPnoueTy0IXy5WRtFcLasNp5aImxOtirpNR9WIiJ2p9rffvt1EByMqM5L6sEr",
+ "X9a7clMFcpAythFGr35gVOZQ4Ou/hklPvLjl+kZkLGvqYioyAFPNecboPWiiw4JmBd2QjM3nIK0xX2nK",
+ "MyqzsDnjJAWpKTOazUbdXKw10MoKpjslW8NdcVDPYGIyLtqlLCD5xqkMt5A60XMTkTjtVavFUInE3q7E",
+ "nxPQtZGuMa5qgAjcQ2iUre0BExwFJFLQK9hzHsV+g+3TYHoSZ/vTAmcdM0XM13rD3GqjWHc/DCFyuwXF",
+ "ELd7hsLUi82bLmmjWdCS7C/ILo2/ai7OcWUZfYcd4IUOw6Awo7fdOHA+8+OoVzVSgqW8H6KE1vJ3+SDd",
+ "AhtJI9gixwi0BpsI1wbUt/clcDCr57XfdqiGaNe9i3kWBbdF/npuYcubbNW+gHDMWZArmn961y4m4DxF",
+ "fED2dtgYHPoGQyRbVKqbvUx4SUfNHfgB725q/gZd0X8Fs0dRrdQN5USYWqz3wTx4s9DcGi7mvoTXCji5",
+ "xjFtHNvDr8nMvdwuJaRMdUWja19do3aFYbEp9xpkrXf43nat82ehb0HGc69pkNdNpn7U8Re8gbA5op+Z",
+ "qQyc3CiVx6ivRxYR/MV4VJhCbcd1cdUKcLOVTzovN4SEOw50C0LW9wx06yeHG7s8G8xlLp1KQX+do2/r",
+ "Fm4jF3WztrFRmn3kbkvnPia4Ml6lwXTH6E6LECxxQhBU8o+H/yAS5ljDUJCDA5zg4GDqmv7jUfuzOc4H",
+ "B1Hp7JPFdVocuTHcvDGK+XnopZ99zTbwqLSzHxXLs12E0Xoi3FQBxUewv7hEBJ+lDukvNtakf1RdLbhb",
+ "BMhZxETW2po8mCp4/Dvi3a/rFnnli36ctJJMbzA/orcfsF+iEajf19FMLhqu1g/d3afFFdQZNpvYp0r5",
+ "2/V7QXO8j6zays0tJPJD8u2aFmUO7qB8c2/2J3j85yfZ8eOHf5r9+fir4xSefPX0+Jg+fUIfPn38EB79",
+ "+asnx/Bw/vXT2aPs0ZNHsyePnnz91dP08ZOHsydfP/3TPcOHDMgW0InPxjP5GxbrTU7fnCUXBtgGJ7Rk",
+ "P8DG1gU0ZOwrDtIUTyIUlOWTE//T/+9P2GEqimZ4/+vEJfuYLLUu1cnR0fX19WHY5WiBwQ6JFlW6PPLz",
+ "9EoSnr45q71E1gqEO2rfyXrrnieFU/z29tvzC3L65uwwqFd/Mjk+PD58iOXNS+C0ZJOTyWP8CU/PEvf9",
+ "yBHb5OTDx+nkaAk0x9hA80cBWrLUf5JAs437v7qmiwXIQ1eG0fy0enTkxYqjDy7o4+O2b0dhRZOjD63Y",
+ "mGxHT6x4cPTBJ/Lb3rqVKc/FBJmlRxX970G7MFClw3JLLaPEbOPDWqZECel85aVkwpwqLOWdQSqB4hkQ",
+ "Eh82a1nx1Jop7BTA8b+vTv+GpppXp38j35DjqXvvrlDtiE1vPcE1OZxlFuy+dUo925zWUVZBmu+TdzHH",
+ "Taw8JB4nQysBtdcjNtwM7TZhUd2aNxt+e5w8ff/hqz9/jMl8/bLkHklB4FGIei18sjtEWkHX3wyhbO3s",
+ "4GbcXyuQm2YRBV1PQoD7doxINPacLdD+5NNRtsqEuiqBTJH/Ov/xNRGSOB33DU2vaueVARlzuEmxYvgq",
+ "OAuekpueQxC76y8E2td1cl6wQi3K9sPEGs3vMUEWAoqH/tHx8S1Kx0cIzVXktl4TV76q7aJXBNY01fmG",
+ "UBVY6FQ1a5LZdVyMokxa5vyou3F4Rl8aJuYL2TdKIPJyHkuwbIfvopP4q4UO5/nCUlS7oyV7yIhC8D52",
+ "2Ydb62nky+7+z9jdvuxASmHONEOfeXPl+OusBWRTIMSBOxAAdUj+LiqU8GwJQIhl5MUZ0M/j53TxmsGz",
+ "gMaBiV8ODroLPzhwe84UmcM1MlnKsWEXHQcHWDP6yZ6sbKs1ufW8cdTZ2We43ma9ous6ESolXPCEY4W6",
+ "FZBALXxy/PAPu8Izjo8FjGhKrOj9cTr56g+8ZWfcCDY0J9jSrubxH3Y15yBXLAVyAUUpJJUs35CfeJ39",
+ "Jsiq22d/P/ErLq65R4TRKquioHLjhGha85yKB/mItvKfXuRlI2gjF6ULhUF6KKJOWpVY+WLy/qPXAUYq",
+ "FtuaHc0w5d/YpqCCxsPaCfoP1NEHtIAP/n7kcpTFP6Inwqq4R/5NSLxlS/H5oNcG1k6PlOp0WZVHH/A/",
+ "qHIGYNmMAEd6zY/QnXn0obUa97m3mvbvTfewxaoQGXiAxXxuaw1s+3z0wf4bTATrEiQzdwq+wnG/2teS",
+ "R5gBdNP/ecPT6I/9dXQL/cZ+PvrQLjTVQpBaVjoT10Ff9ABY91V/vrr0auvvo2vKtJFf3LMjTATe76yB",
+ "5kcux1Dn1+ZZf+8L5ioIfuxIPKWw4a1tZfMtvb5ohaJIG2f4TKCBYIgXrpMZ48ggQgbW2PXsx7720mNb",
+ "F0uwDmrvGo2Ih1qQmRQ0S6nC/NIuG1dPbf14S9WoGxZ5FnF8IZhoCei/YDFH/XCnNwTHHSP/BfsSlGVA",
+ "OVxZe+DvLDP1IHpGM+LjoRPyiuZmwyEjp04yb2Hj95Z3Pr+A8pklik8mAjzzh08RilH7Ld1NxuONg7R5",
+ "Y+57o+AZBrAAnjgWlMxEtvFVRiS91msb499lbkd1uZjoxzswEv5rWwZ3GQS/2OG+2OG+WGq+2OG+7O4X",
+ "O9xIO9wXK9UXK9X/SivVPqapmJjpTDPD0iamU6etea1uR5u0FTWLbz8hYrqWyfrVOZg+JOQCkwJQc0vA",
+ "CiTNsYKZCrJ8FBgCiQ+RIDu55EkLEhtoaCa+3/zXRnheVsfHj4EcP+j2UZrlecib+31R3sVPNqXgN+Ry",
+ "cjnpjSShECvI7POE8Nm07bVz2P+vHvfHXr4FfP6wpCuo3ysRVc3nLGUW5bngC0IXoolONnybcIFfQBrg",
+ "bNYqwvTUZYZjilybxbuk9u3X3W3JvS8BnDVbuNOj3yGXuDPfEN6envz/GOPG/18tpd/iCdGtGOnWsXtc",
+ "9QtX+RRc5bPzlT+6jzQwH/6PFDOfHD/5wy4oNDa/Fpp8h5H3txPH6kIhseRdNxW0/AtBb+5ronfDaFi8",
+ "Res42HfvzUWAlQHdBdsEd54cHWF6oKVQ+mhirr924Gf48X0Nsy/nNCklW2F26Pcf/18AAAD//wqEN9SA",
+ "5AAA",
}
// GetSwagger returns the content of the embedded swagger specification file
diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go
index d9e53216d..05aa4ed91 100644
--- a/daemon/algod/api/server/v2/handlers.go
+++ b/daemon/algod/api/server/v2/handlers.go
@@ -45,6 +45,7 @@ import (
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/ledger/eval"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/ledger/simulation"
"github.com/algorand/go-algorand/logging"
@@ -54,14 +55,16 @@ import (
"github.com/algorand/go-algorand/stateproof"
)
-// max compiled teal program is currently 8k
+// MaxTealSourceBytes sets a size limit for TEAL source programs for requests
+// Max TEAL program size is currently 8k
// but we allow for comments, spacing, and repeated consts
-// in the source teal, allow up to 200kb
-const maxTealSourceBytes = 200_000
+// in the source TEAL, so we allow up to 200KB
+const MaxTealSourceBytes = 200_000
+// MaxTealDryrunBytes sets a size limit for dryrun requests
// With the ability to hold unlimited assets DryrunRequests can
-// become quite large, allow up to 1mb
-const maxTealDryrunBytes = 1_000_000
+// become quite large, so we allow up to 1MB
+const MaxTealDryrunBytes = 1_000_000
// Handlers is an implementation to the V2 route handler interface defined by the generated code.
type Handlers struct {
@@ -89,6 +92,7 @@ type LedgerForAPI interface {
Block(rnd basics.Round) (blk bookkeeping.Block, err error)
AddressTxns(id basics.Address, r basics.Round) ([]transactions.SignedTxnWithAD, error)
GetStateDeltaForRound(rnd basics.Round) (ledgercore.StateDelta, error)
+ GetTracer() logic.EvalTracer
}
// NodeInterface represents node fns used by the handlers.
@@ -98,7 +102,7 @@ type NodeInterface interface {
GenesisID() string
GenesisHash() crypto.Digest
BroadcastSignedTxGroup(txgroup []transactions.SignedTxn) error
- Simulate(txgroup []transactions.SignedTxn) (result simulation.Result, err error)
+ Simulate(request simulation.Request) (result simulation.Result, err error)
GetPendingTransaction(txID transactions.Txid) (res node.TxnWithStatus, found bool)
GetPendingTxnsFromPool() ([]transactions.SignedTxn, error)
SuggestedFee() basics.MicroAlgos
@@ -113,6 +117,8 @@ type NodeInterface interface {
SetSyncRound(rnd uint64) error
GetSyncRound() uint64
UnsetSyncRound()
+ GetBlockTimeStampOffset() (*int64, error)
+ SetBlockTimeStampOffset(int64) error
}
func roundToPtrOrNil(value basics.Round) *uint64 {
@@ -793,26 +799,29 @@ func (v2 *Handlers) GetStatus(ctx echo.Context) error {
CatchpointAcquiredBlocks: &stat.CatchpointCatchupAcquiredBlocks,
}
- nextProtocolVoteBefore := uint64(stat.NextProtocolVoteBefore)
- var votesToGo int64 = int64(nextProtocolVoteBefore) - int64(stat.LastRound)
- if votesToGo < 0 {
- votesToGo = 0
- }
- if nextProtocolVoteBefore > 0 {
+ // Make sure a vote is happening
+ if stat.NextProtocolVoteBefore > 0 {
+ votesToGo := uint64(0)
+ // Check if the vote window is still open.
+ if stat.NextProtocolVoteBefore > stat.LastRound {
+ // subtract 1 because the variables are referring to "Last" round and "VoteBefore"
+ votesToGo = uint64(stat.NextProtocolVoteBefore - stat.LastRound - 1)
+ }
+
consensus := config.Consensus[protocol.ConsensusCurrentVersion]
upgradeVoteRounds := consensus.UpgradeVoteRounds
upgradeThreshold := consensus.UpgradeThreshold
- votes := uint64(consensus.UpgradeVoteRounds) - uint64(votesToGo)
+ votes := consensus.UpgradeVoteRounds - votesToGo
votesYes := stat.NextProtocolApprovals
votesNo := votes - votesYes
- upgradeDelay := uint64(stat.UpgradeDelay)
+ upgradeDelay := stat.UpgradeDelay
response.UpgradeVotesRequired = &upgradeThreshold
response.UpgradeNodeVote = &stat.UpgradeApprove
response.UpgradeDelay = &upgradeDelay
response.UpgradeVotes = &votes
response.UpgradeYesVotes = &votesYes
response.UpgradeNoVotes = &votesNo
- response.UpgradeNextProtocolVoteBefore = &nextProtocolVoteBefore
+ response.UpgradeNextProtocolVoteBefore = numOrNil(uint64(stat.NextProtocolVoteBefore))
response.UpgradeVoteRounds = &upgradeVoteRounds
}
@@ -918,25 +927,41 @@ func (v2 *Handlers) RawTransaction(ctx echo.Context) error {
return ctx.JSON(http.StatusOK, model.PostTransactionsResponse{TxId: txid.String()})
}
-// preEncodedSimulateTxnResult mirrors model.SimulateTransactionResult
-type preEncodedSimulateTxnResult struct {
- Txn PreEncodedTxInfo `codec:"txn-result"`
- MissingSignature *bool `codec:"missing-signature,omitempty"`
+// PreEncodedSimulateTxnResult mirrors model.SimulateTransactionResult
+type PreEncodedSimulateTxnResult struct {
+ Txn PreEncodedTxInfo `codec:"txn-result"`
+ AppBudgetConsumed *uint64 `codec:"app-budget-consumed,omitempty"`
+ LogicSigBudgetConsumed *uint64 `codec:"logic-sig-budget-consumed,omitempty"`
+}
+
+// PreEncodedSimulateTxnGroupResult mirrors model.SimulateTransactionGroupResult
+type PreEncodedSimulateTxnGroupResult struct {
+ Txns []PreEncodedSimulateTxnResult `codec:"txn-results"`
+ FailureMessage *string `codec:"failure-message,omitempty"`
+ FailedAt *[]uint64 `codec:"failed-at,omitempty"`
+ AppBudgetAdded *uint64 `codec:"app-budget-added,omitempty"`
+ AppBudgetConsumed *uint64 `codec:"app-budget-consumed,omitempty"`
}
-// preEncodedSimulateTxnGroupResult mirrors model.SimulateTransactionGroupResult
-type preEncodedSimulateTxnGroupResult struct {
- Txns []preEncodedSimulateTxnResult `codec:"txn-results"`
- FailureMessage *string `codec:"failure-message,omitempty"`
- FailedAt *[]uint64 `codec:"failed-at,omitempty"`
+// PreEncodedSimulateResponse mirrors model.SimulateResponse
+type PreEncodedSimulateResponse struct {
+ Version uint64 `codec:"version"`
+ LastRound uint64 `codec:"last-round"`
+ TxnGroups []PreEncodedSimulateTxnGroupResult `codec:"txn-groups"`
+ EvalOverrides *model.SimulationEvalOverrides `codec:"eval-overrides,omitempty"`
}
-// preEncodedSimulateResponse mirrors model.SimulateResponse
-type preEncodedSimulateResponse struct {
- Version uint64 `codec:"version"`
- LastRound uint64 `codec:"last-round"`
- TxnGroups []preEncodedSimulateTxnGroupResult `codec:"txn-groups"`
- WouldSucceed bool `codec:"would-succeed"`
+// PreEncodedSimulateRequestTransactionGroup mirrors model.SimulateRequestTransactionGroup
+type PreEncodedSimulateRequestTransactionGroup struct {
+ Txns []transactions.SignedTxn `codec:"txns"`
+}
+
+// PreEncodedSimulateRequest mirrors model.SimulateRequest
+type PreEncodedSimulateRequest struct {
+ TxnGroups []PreEncodedSimulateRequestTransactionGroup `codec:"txn-groups"`
+ AllowEmptySignatures bool `codec:"allow-empty-signatures,omitempty"`
+ AllowMoreLogging bool `codec:"allow-more-logging,omitempty"`
+ ExtraOpcodeBudget uint64 `codec:"extra-opcode-budget,omitempty"`
}
// SimulateTransaction simulates broadcasting a raw transaction to the network, returning relevant simulation results.
@@ -952,15 +977,38 @@ func (v2 *Handlers) SimulateTransaction(ctx echo.Context, params model.SimulateT
}
proto := config.Consensus[stat.LastVersion]
- txgroup, err := decodeTxGroup(ctx.Request().Body, proto.MaxTxGroupSize)
+ requestBuffer := new(bytes.Buffer)
+ requestBodyReader := http.MaxBytesReader(nil, ctx.Request().Body, MaxTealDryrunBytes)
+ _, err = requestBuffer.ReadFrom(requestBodyReader)
if err != nil {
return badRequest(ctx, err, err.Error(), v2.Log)
}
+ requestData := requestBuffer.Bytes()
+
+ var simulateRequest PreEncodedSimulateRequest
+ err = decode(protocol.CodecHandle, requestData, &simulateRequest)
+ if err != nil {
+ err = decode(protocol.JSONStrictHandle, requestData, &simulateRequest)
+ if err != nil {
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
+ }
+
+ for _, txgroup := range simulateRequest.TxnGroups {
+ if len(txgroup.Txns) == 0 {
+ err = errors.New("empty txgroup")
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
+ if len(txgroup.Txns) > proto.MaxTxGroupSize {
+ err = fmt.Errorf("transaction group size %d exceeds protocol max %d", len(txgroup.Txns), proto.MaxTxGroupSize)
+ return badRequest(ctx, err, err.Error(), v2.Log)
+ }
+ }
// Simulate transaction
- simulationResult, err := v2.Node.Simulate(txgroup)
+ simulationResult, err := v2.Node.Simulate(convertSimulationRequest(simulateRequest))
if err != nil {
- var invalidTxErr simulation.InvalidTxGroupError
+ var invalidTxErr simulation.InvalidRequestError
switch {
case errors.As(err, &invalidTxErr):
return badRequest(ctx, invalidTxErr, invalidTxErr.Error(), v2.Log)
@@ -975,12 +1023,12 @@ func (v2 *Handlers) SimulateTransaction(ctx echo.Context, params model.SimulateT
if err != nil {
return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log)
}
- data, err := encode(handle, &response)
+ responseData, err := encode(handle, &response)
if err != nil {
return internalError(ctx, err, errFailedToEncodeResponse, v2.Log)
}
- return ctx.Blob(http.StatusOK, contentType, data)
+ return ctx.Blob(http.StatusOK, contentType, responseData)
}
// TealDryrun takes transactions and additional simulated ledger state and returns debugging information.
@@ -991,7 +1039,7 @@ func (v2 *Handlers) TealDryrun(ctx echo.Context) error {
}
req := ctx.Request()
buf := new(bytes.Buffer)
- req.Body = http.MaxBytesReader(nil, req.Body, maxTealDryrunBytes)
+ req.Body = http.MaxBytesReader(nil, req.Body, MaxTealDryrunBytes)
_, err := buf.ReadFrom(ctx.Request().Body)
if err != nil {
return badRequest(ctx, err, err.Error(), v2.Log)
@@ -1093,7 +1141,7 @@ func (v2 *Handlers) GetLedgerStateDelta(ctx echo.Context, round uint64, params m
}
sDelta, err := v2.Node.LedgerForAPI().GetStateDeltaForRound(basics.Round(round))
if err != nil {
- return notFound(ctx, err, errFailedRetrievingStateDelta, v2.Log)
+ return notFound(ctx, err, fmt.Sprintf(errFailedRetrievingStateDelta, err), v2.Log)
}
data, err := encode(handle, sDelta)
if err != nil {
@@ -1303,8 +1351,6 @@ func (v2 *Handlers) startCatchup(ctx echo.Context, catchpoint string) error {
code = http.StatusOK
case *node.CatchpointUnableToStartError:
return badRequest(ctx, err, err.Error(), v2.Log)
- case *node.CatchpointSyncRoundFailure:
- return badRequest(ctx, err, fmt.Sprintf(errFailedToStartCatchup, err), v2.Log)
default:
return internalError(ctx, err, fmt.Sprintf(errFailedToStartCatchup, err), v2.Log)
}
@@ -1451,6 +1497,7 @@ func (v2 *Handlers) GetApplicationBoxByName(ctx echo.Context, applicationID uint
}
response := model.BoxResponse{
+ Round: uint64(lastRound),
Name: boxName,
Value: value,
}
@@ -1524,7 +1571,7 @@ func (v2 *Handlers) TealCompile(ctx echo.Context, params model.TealCompileParams
}
buf := new(bytes.Buffer)
- ctx.Request().Body = http.MaxBytesReader(nil, ctx.Request().Body, maxTealSourceBytes)
+ ctx.Request().Body = http.MaxBytesReader(nil, ctx.Request().Body, MaxTealSourceBytes)
_, err = buf.ReadFrom(ctx.Request().Body)
if err != nil {
return badRequest(ctx, err, err.Error(), v2.Log)
@@ -1641,7 +1688,7 @@ func (v2 *Handlers) TealDisassemble(ctx echo.Context) error {
return ctx.String(http.StatusNotFound, "/teal/disassemble was not enabled in the configuration file by setting the EnableDeveloperAPI to true")
}
buf := new(bytes.Buffer)
- ctx.Request().Body = http.MaxBytesReader(nil, ctx.Request().Body, maxTealSourceBytes)
+ ctx.Request().Body = http.MaxBytesReader(nil, ctx.Request().Body, MaxTealSourceBytes)
_, err := buf.ReadFrom(ctx.Request().Body)
if err != nil {
return badRequest(ctx, err, err.Error(), v2.Log)
@@ -1657,7 +1704,90 @@ func (v2 *Handlers) TealDisassemble(ctx echo.Context) error {
return ctx.JSON(http.StatusOK, response)
}
+// GetLedgerStateDeltaForTransactionGroup retrieves the delta for a specified transaction group.
+// (GET /v2/deltas/txn/group/{id})
+func (v2 *Handlers) GetLedgerStateDeltaForTransactionGroup(ctx echo.Context, id string, params model.GetLedgerStateDeltaForTransactionGroupParams) error {
+ handle, contentType, err := getCodecHandle((*string)(params.Format))
+ if err != nil {
+ return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log)
+ }
+ idDigest, err := crypto.DigestFromString(id)
+ if err != nil {
+ return badRequest(ctx, err, errNoValidTxnSpecified, v2.Log)
+ }
+ tracer, ok := v2.Node.LedgerForAPI().GetTracer().(*eval.TxnGroupDeltaTracer)
+ if !ok {
+ return notImplemented(ctx, err, errFailedRetrievingTracer, v2.Log)
+ }
+ delta, err := tracer.GetDeltaForID(idDigest)
+ if err != nil {
+ return notFound(ctx, err, fmt.Sprintf(errFailedRetrievingStateDelta, err), v2.Log)
+ }
+ data, err := encode(handle, delta)
+ if err != nil {
+ return internalError(ctx, err, errFailedToEncodeResponse, v2.Log)
+ }
+ return ctx.Blob(http.StatusOK, contentType, data)
+}
+
+// GetTransactionGroupLedgerStateDeltasForRound retrieves the deltas for transaction groups in a given round.
+// (GET /v2/deltas/{round}/txn/group)
+func (v2 *Handlers) GetTransactionGroupLedgerStateDeltasForRound(ctx echo.Context, round uint64, params model.GetTransactionGroupLedgerStateDeltasForRoundParams) error {
+ handle, contentType, err := getCodecHandle((*string)(params.Format))
+ if err != nil {
+ return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log)
+ }
+ tracer, ok := v2.Node.LedgerForAPI().GetTracer().(*eval.TxnGroupDeltaTracer)
+ if !ok {
+ return notImplemented(ctx, err, errFailedRetrievingTracer, v2.Log)
+ }
+ deltas, err := tracer.GetDeltasForRound(basics.Round(round))
+ if err != nil {
+ return notFound(ctx, err, fmt.Sprintf(errFailedRetrievingStateDelta, err), v2.Log)
+ }
+ response := struct {
+ Deltas []eval.TxnGroupDeltaWithIds
+ }{
+ Deltas: deltas,
+ }
+ data, err := encode(handle, response)
+ if err != nil {
+ return internalError(ctx, err, errFailedToEncodeResponse, v2.Log)
+ }
+ return ctx.Blob(http.StatusOK, contentType, data)
+}
+
// ExperimentalCheck is only available when EnabledExperimentalAPI is true
func (v2 *Handlers) ExperimentalCheck(ctx echo.Context) error {
return ctx.JSON(http.StatusOK, true)
}
+
+// GetBlockTimeStampOffset gets the timestamp offset.
+// This is only available in dev mode.
+// (GET /v2/devmode/blocks/offset)
+func (v2 *Handlers) GetBlockTimeStampOffset(ctx echo.Context) error {
+ offset, err := v2.Node.GetBlockTimeStampOffset()
+ if err != nil {
+ err = fmt.Errorf("cannot get block timestamp offset because we are not in dev mode")
+ return badRequest(ctx, err, fmt.Sprintf(errFailedRetrievingTimeStampOffset, err), v2.Log)
+ } else if offset == nil {
+ err = fmt.Errorf("block timestamp offset was never set, using real clock for timestamps")
+ return notFound(ctx, err, fmt.Sprintf(errFailedRetrievingTimeStampOffset, err), v2.Log)
+ }
+ return ctx.JSON(http.StatusOK, model.GetBlockTimeStampOffsetResponse{Offset: uint64(*offset)})
+}
+
+// SetBlockTimeStampOffset sets the timestamp offset.
+// This is only available in dev mode.
+// (POST /v2/devmode/blocks/offset/{offset})
+func (v2 *Handlers) SetBlockTimeStampOffset(ctx echo.Context, offset uint64) error {
+ if offset > math.MaxInt64 {
+ err := fmt.Errorf("block timestamp offset cannot be larger than max int64 value")
+ return badRequest(ctx, err, fmt.Sprintf(errFailedSettingTimeStampOffset, err), v2.Log)
+ }
+ err := v2.Node.SetBlockTimeStampOffset(int64(offset))
+ if err != nil {
+ return badRequest(ctx, err, fmt.Sprintf(errFailedSettingTimeStampOffset, err), v2.Log)
+ }
+ return ctx.NoContent(http.StatusOK)
+}
diff --git a/daemon/algod/api/server/v2/handlers_test.go b/daemon/algod/api/server/v2/handlers_test.go
index 42570db4c..42de7293f 100644
--- a/daemon/algod/api/server/v2/handlers_test.go
+++ b/daemon/algod/api/server/v2/handlers_test.go
@@ -178,7 +178,7 @@ func TestPendingTransactionResponseStruct(t *testing.T) {
generatedResponseGraph.AssertEquals(t, customResponseGraph)
}
-// TestSimulateResponseStruct ensures that the hand-written preEncodedSimulateResponse has the same
+// TestSimulateResponseStruct ensures that the hand-written PreEncodedSimulateResponse has the same
// encoding structure as the generated model.SimulateResponse
func TestSimulateResponseStruct(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -187,7 +187,7 @@ func TestSimulateResponseStruct(t *testing.T) {
generatedResponseType := reflect.TypeOf(model.SimulateResponse{})
generatedResponseGraph := makeTagGraph(generatedResponseType, make(map[reflect.Type]*tagNode))
- customResponseType := reflect.TypeOf(preEncodedSimulateResponse{})
+ customResponseType := reflect.TypeOf(PreEncodedSimulateResponse{})
customResponseGraph := makeTagGraph(customResponseType, make(map[reflect.Type]*tagNode))
expectedGeneratedTxnGraph := map[string]*tagNode{
@@ -207,3 +207,32 @@ func TestSimulateResponseStruct(t *testing.T) {
generatedResponseGraph.AssertEquals(t, customResponseGraph)
}
+
+// TestSimulateRequestStruct ensures that the hand-written PreEncodedSimulateRequest has the same
+// encoding structure as the generated model.SimulateRequest
+func TestSimulateRequestStruct(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ generatedResponseType := reflect.TypeOf(model.SimulateRequest{})
+ generatedResponseGraph := makeTagGraph(generatedResponseType, make(map[reflect.Type]*tagNode))
+
+ customResponseType := reflect.TypeOf(PreEncodedSimulateRequest{})
+ customResponseGraph := makeTagGraph(customResponseType, make(map[reflect.Type]*tagNode))
+
+ expectedGeneratedTxnGraph := map[string]*tagNode{
+ "<value>": {children: make(map[string]*tagNode)},
+ }
+ preEncodedTxPath := func(graph *tagNode) *tagNode {
+ // Resolve the field model.SimulateRequest{}.TxnGroups[0].Txns[0]
+ return graph.children["txn-groups"].children["<value>"].children["txns"].children["<value>"]
+ }
+ if assert.Equal(t, expectedGeneratedTxnGraph, preEncodedTxPath(generatedResponseGraph).children) {
+ // The generated response type uses json.RawMessage to represent a transaction, while
+ // the custom response type uses transactions.SignedTxn. Let's copy that into the generated
+ // type.
+ preEncodedTxPath(generatedResponseGraph).children = preEncodedTxPath(customResponseGraph).children
+ }
+
+ generatedResponseGraph.AssertEquals(t, customResponseGraph)
+}
diff --git a/daemon/algod/api/server/v2/test/handlers_resources_test.go b/daemon/algod/api/server/v2/test/handlers_resources_test.go
index bef029100..1de86ddc1 100644
--- a/daemon/algod/api/server/v2/test/handlers_resources_test.go
+++ b/daemon/algod/api/server/v2/test/handlers_resources_test.go
@@ -19,6 +19,7 @@ package test
import (
"encoding/json"
"fmt"
+ "github.com/algorand/go-algorand/data/transactions/logic"
"net/http"
"net/http/httptest"
"testing"
@@ -48,6 +49,11 @@ type mockLedger struct {
kvstore map[string][]byte
latest basics.Round
blocks []bookkeeping.Block
+ tracer logic.EvalTracer
+}
+
+func (l *mockLedger) GetTracer() logic.EvalTracer {
+ return l.tracer
}
func (l *mockLedger) GetStateDeltaForRound(rnd basics.Round) (ledgercore.StateDelta, error) {
@@ -229,7 +235,7 @@ func setupTestForLargeResources(t *testing.T, acctSize, maxResults int, accountM
acctData = accountMaker(acctSize)
ml.accounts[fakeAddr] = acctData
- mockNode := makeMockNode(&ml, t.Name(), nil, false)
+ mockNode := makeMockNode(&ml, t.Name(), nil, cannedStatusReportGolden, false)
mockNode.config.MaxAPIResourcesPerAccount = uint64(maxResults)
dummyShutdownChan := make(chan struct{})
handlers = v2.Handlers{
diff --git a/daemon/algod/api/server/v2/test/handlers_test.go b/daemon/algod/api/server/v2/test/handlers_test.go
index 0c7c5135c..e30169b4a 100644
--- a/daemon/algod/api/server/v2/test/handlers_test.go
+++ b/daemon/algod/api/server/v2/test/handlers_test.go
@@ -23,11 +23,16 @@ import (
"errors"
"fmt"
"io"
+ "math"
"net/http"
"net/http/httptest"
+ "strings"
"testing"
"time"
+ "github.com/algorand/go-algorand/ledger/eval"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+
"github.com/labstack/echo/v4"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
@@ -61,14 +66,14 @@ import (
"github.com/algorand/go-algorand/util/execpool"
)
-const stateProofIntervalForHandlerTests = uint64(256)
+const stateProofInterval = uint64(256)
-func setupTestForMethodGet(t *testing.T, consensusUpgrade bool) (v2.Handlers, echo.Context, *httptest.ResponseRecorder, []account.Root, []transactions.SignedTxn, func()) {
+func setupMockNodeForMethodGet(t *testing.T, status node.StatusReport, devmode bool) (v2.Handlers, echo.Context, *httptest.ResponseRecorder, []account.Root, []transactions.SignedTxn, func()) {
numAccounts := 1
numTransactions := 1
offlineAccounts := true
mockLedger, rootkeys, _, stxns, releasefunc := testingenv(t, numAccounts, numTransactions, offlineAccounts)
- mockNode := makeMockNode(mockLedger, t.Name(), nil, consensusUpgrade)
+ mockNode := makeMockNode(mockLedger, t.Name(), nil, status, devmode)
dummyShutdownChan := make(chan struct{})
handler := v2.Handlers{
Node: mockNode,
@@ -82,17 +87,28 @@ func setupTestForMethodGet(t *testing.T, consensusUpgrade bool) (v2.Handlers, ec
return handler, c, rec, rootkeys, stxns, releasefunc
}
+func setupTestForMethodGet(t *testing.T, status node.StatusReport) (v2.Handlers, echo.Context, *httptest.ResponseRecorder, []account.Root, []transactions.SignedTxn, func()) {
+ return setupMockNodeForMethodGet(t, status, false)
+}
+
+func numOrNil(n uint64) *uint64 {
+ if n == 0 {
+ return nil
+ }
+ return &n
+}
+
func TestSimpleMockBuilding(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- handler, _, _, _, _, releasefunc := setupTestForMethodGet(t, false)
+ handler, _, _, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden)
defer releasefunc()
require.Equal(t, t.Name(), handler.Node.GenesisID())
}
func accountInformationTest(t *testing.T, address string, expectedCode int) {
- handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, false)
+ handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden)
defer releasefunc()
err := handler.AccountInformation(c, address, model.AccountInformationParams{})
require.NoError(t, err)
@@ -115,7 +131,7 @@ func TestAccountInformation(t *testing.T) {
}
func getBlockTest(t *testing.T, blockNum uint64, format string, expectedCode int) {
- handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, false)
+ handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden)
defer releasefunc()
err := handler.GetBlock(c, blockNum, model.GetBlockParams{Format: (*model.GetBlockParamsFormat)(&format)})
require.NoError(t, err)
@@ -134,7 +150,7 @@ func TestGetBlock(t *testing.T) {
}
func testGetLedgerStateDelta(t *testing.T, round uint64, format string, expectedCode int) {
- handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, false)
+ handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden)
defer releasefunc()
insertRounds(require.New(t), handler, 3)
err := handler.GetLedgerStateDelta(c, round, model.GetLedgerStateDeltaParams{Format: (*model.GetLedgerStateDeltaParamsFormat)(&format)})
@@ -179,7 +195,7 @@ func TestSyncRound(t *testing.T) {
numTransactions := 1
offlineAccounts := true
mockLedger, _, _, _, releasefunc := testingenv(t, numAccounts, numTransactions, offlineAccounts)
- mockNode := makeMockNode(mockLedger, t.Name(), nil, false)
+ mockNode := makeMockNode(mockLedger, t.Name(), nil, cannedStatusReportGolden, false)
dummyShutdownChan := make(chan struct{})
handler := v2.Handlers{
Node: mockNode,
@@ -234,13 +250,12 @@ func TestSyncRound(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 200, rec.Code)
mockCall.Unset()
- c, rec = newReq(t)
mock.AssertExpectationsForObjects(t, mockNode)
}
func addBlockHelper(t *testing.T) (v2.Handlers, echo.Context, *httptest.ResponseRecorder, transactions.SignedTxn, func()) {
- handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, false)
+ handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden)
l := handler.Node.LedgerForAPI()
@@ -324,7 +339,7 @@ func TestGetBlockHash(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, false)
+ handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden)
defer releasefunc()
err := handler.GetBlockHash(c, 0)
@@ -342,7 +357,7 @@ func TestGetBlockGetBlockHash(t *testing.T) {
t.Parallel()
a := require.New(t)
- handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, false)
+ handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden)
defer releasefunc()
insertRounds(a, handler, 2)
@@ -414,7 +429,7 @@ func TestGetSupply(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- handler, c, _, _, _, releasefunc := setupTestForMethodGet(t, false)
+ handler, c, _, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden)
defer releasefunc()
err := handler.GetSupply(c)
require.NoError(t, err)
@@ -424,7 +439,7 @@ func TestGetStatus(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, false)
+ handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden)
defer releasefunc()
err := handler.GetStatus(c)
require.NoError(t, err)
@@ -455,17 +470,72 @@ func TestGetStatus(t *testing.T) {
require.Equal(t, expectedResult, actualResult)
}
+func TestGetStatusConsensusUpgradeUnderflow(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // Setup status report with unanimous YES votes.
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ currentRound := basics.Round(1000000)
+ stat := node.StatusReport{
+ LastRound: currentRound - 1,
+ LastVersion: protocol.ConsensusCurrentVersion,
+ NextVersion: protocol.ConsensusCurrentVersion,
+ UpgradePropose: "upgradePropose",
+ NextProtocolVoteBefore: currentRound,
+ NextProtocolApprovals: proto.UpgradeVoteRounds,
+ }
+
+ handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, stat)
+ defer releasefunc()
+ err := handler.GetStatus(c)
+ require.NoError(t, err)
+ actualResult := model.NodeStatusResponse{}
+ err = protocol.DecodeJSON(rec.Body.Bytes(), &actualResult)
+ require.NoError(t, err)
+
+ // Make sure the votes are all yes, and 0 no.
+ require.Equal(t, uint64(0), *actualResult.UpgradeNoVotes)
+ require.Equal(t, proto.UpgradeVoteRounds, *actualResult.UpgradeYesVotes)
+ require.Equal(t, proto.UpgradeVoteRounds, *actualResult.UpgradeVotes)
+ require.Equal(t, proto.UpgradeThreshold, *actualResult.UpgradeVotesRequired)
+}
+
func TestGetStatusConsensusUpgrade(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, true)
+ cannedStatusReportConsensusUpgradeGolden := node.StatusReport{
+ LastRound: basics.Round(97000),
+ LastVersion: protocol.ConsensusCurrentVersion,
+ NextVersion: protocol.ConsensusCurrentVersion,
+ NextVersionRound: 200000,
+ NextVersionSupported: true,
+ StoppedAtUnsupportedRound: true,
+ Catchpoint: "",
+ CatchpointCatchupAcquiredBlocks: 0,
+ CatchpointCatchupProcessedAccounts: 0,
+ CatchpointCatchupVerifiedAccounts: 0,
+ CatchpointCatchupTotalAccounts: 0,
+ CatchpointCatchupTotalKVs: 0,
+ CatchpointCatchupProcessedKVs: 0,
+ CatchpointCatchupVerifiedKVs: 0,
+ CatchpointCatchupTotalBlocks: 0,
+ LastCatchpoint: "",
+ UpgradePropose: "upgradePropose",
+ UpgradeApprove: false,
+ UpgradeDelay: 0,
+ NextProtocolVoteBefore: 100000,
+ NextProtocolApprovals: 5000,
+ }
+
+ handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportConsensusUpgradeGolden)
defer releasefunc()
err := handler.GetStatus(c)
require.NoError(t, err)
stat := cannedStatusReportConsensusUpgradeGolden
consensus := config.Consensus[protocol.ConsensusCurrentVersion]
- votesToGo := uint64(stat.NextProtocolVoteBefore) - uint64(stat.LastRound)
+ votesToGo := uint64(stat.NextProtocolVoteBefore) - uint64(stat.LastRound) - 1
nextProtocolVoteBefore := uint64(stat.NextProtocolVoteBefore)
votes := uint64(consensus.UpgradeVoteRounds) - votesToGo
votesNo := votes - stat.NextProtocolApprovals
@@ -508,7 +578,7 @@ func TestGetStatusAfterBlock(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, false)
+ handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden)
defer releasefunc()
err := handler.WaitForBlock(c, 0)
require.NoError(t, err)
@@ -521,7 +591,7 @@ func TestGetTransactionParams(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, false)
+ handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden)
defer releasefunc()
err := handler.TransactionParams(c)
require.NoError(t, err)
@@ -529,7 +599,7 @@ func TestGetTransactionParams(t *testing.T) {
}
func pendingTransactionInformationTest(t *testing.T, txidToUse int, format string, expectedCode int) {
- handler, c, rec, _, stxns, releasefunc := setupTestForMethodGet(t, false)
+ handler, c, rec, _, stxns, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden)
defer releasefunc()
txid := "bad txid"
if txidToUse >= 0 {
@@ -552,7 +622,7 @@ func TestPendingTransactionInformation(t *testing.T) {
}
func getPendingTransactionsTest(t *testing.T, format string, max uint64, expectedCode int) {
- handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, false)
+ handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden)
defer releasefunc()
params := model.GetPendingTransactionsParams{Format: (*model.GetPendingTransactionsParamsFormat)(&format), Max: &max}
err := handler.GetPendingTransactions(c, params)
@@ -631,7 +701,7 @@ func TestPendingTransactions(t *testing.T) {
}
func pendingTransactionsByAddressTest(t *testing.T, rootkeyToUse int, format string, expectedCode int) {
- handler, c, rec, rootkeys, _, releasefunc := setupTestForMethodGet(t, false)
+ handler, c, rec, rootkeys, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden)
defer releasefunc()
address := "bad address"
if rootkeyToUse >= 0 {
@@ -653,13 +723,13 @@ func TestPendingTransactionsByAddress(t *testing.T) {
pendingTransactionsByAddressTest(t, -1, "json", 400)
}
-func prepareTransactionTest(t *testing.T, txnToUse, expectedCode int) (handler v2.Handlers, c echo.Context, rec *httptest.ResponseRecorder, releasefunc func()) {
+func prepareTransactionTest(t *testing.T, txnToUse int, txnPrep func(transactions.SignedTxn) []byte) (handler v2.Handlers, c echo.Context, rec *httptest.ResponseRecorder, releasefunc func()) {
numAccounts := 5
numTransactions := 5
offlineAccounts := true
mockLedger, _, _, stxns, releasefunc := testingenv(t, numAccounts, numTransactions, offlineAccounts)
dummyShutdownChan := make(chan struct{})
- mockNode := makeMockNode(mockLedger, t.Name(), nil, false)
+ mockNode := makeMockNode(mockLedger, t.Name(), nil, cannedStatusReportGolden, false)
handler = v2.Handlers{
Node: mockNode,
@@ -670,7 +740,7 @@ func prepareTransactionTest(t *testing.T, txnToUse, expectedCode int) (handler v
var body io.Reader
if txnToUse >= 0 {
stxn := stxns[txnToUse]
- bodyBytes := protocol.Encode(&stxn)
+ bodyBytes := txnPrep(stxn)
body = bytes.NewReader(bodyBytes)
}
req := httptest.NewRequest(http.MethodPost, "/", body)
@@ -680,7 +750,10 @@ func prepareTransactionTest(t *testing.T, txnToUse, expectedCode int) (handler v
}
func postTransactionTest(t *testing.T, txnToUse, expectedCode int) {
- handler, c, rec, releasefunc := prepareTransactionTest(t, txnToUse, expectedCode)
+ txnPrep := func(stxn transactions.SignedTxn) []byte {
+ return protocol.Encode(&stxn)
+ }
+ handler, c, rec, releasefunc := prepareTransactionTest(t, txnToUse, txnPrep)
defer releasefunc()
err := handler.RawTransaction(c)
require.NoError(t, err)
@@ -696,7 +769,17 @@ func TestPostTransaction(t *testing.T) {
}
func simulateTransactionTest(t *testing.T, txnToUse int, format string, expectedCode int) {
- handler, c, rec, releasefunc := prepareTransactionTest(t, txnToUse, expectedCode)
+ txnPrep := func(stxn transactions.SignedTxn) []byte {
+ request := v2.PreEncodedSimulateRequest{
+ TxnGroups: []v2.PreEncodedSimulateRequestTransactionGroup{
+ {
+ Txns: []transactions.SignedTxn{stxn},
+ },
+ },
+ }
+ return protocol.EncodeReflect(&request)
+ }
+ handler, c, rec, releasefunc := prepareTransactionTest(t, txnToUse, txnPrep)
defer releasefunc()
err := handler.SimulateTransaction(c, model.SimulateTransactionParams{Format: (*model.SimulateTransactionParamsFormat)(&format)})
require.NoError(t, err)
@@ -743,49 +826,35 @@ func TestPostSimulateTransaction(t *testing.T) {
}
}
-func copyInnerTxnGroupIDs(t *testing.T, dst, src *model.PendingTransactionResponse) {
+func copyInnerTxnGroupIDs(t *testing.T, dst, src *v2.PreEncodedTxInfo) {
t.Helper()
- // msgpack decodes to map[interface{}]interface{} while JSON decodes to map[string]interface{}
- txn := dst.Txn["txn"]
- switch dstTxnMap := txn.(type) {
- case map[string]interface{}:
- srcTxnMap := src.Txn["txn"].(map[string]interface{})
- groupID, hasGroupID := srcTxnMap["grp"]
- if hasGroupID {
- dstTxnMap["grp"] = groupID
- }
- case map[interface{}]interface{}:
- srcTxnMap := src.Txn["txn"].(map[interface{}]interface{})
- groupID, hasGroupID := srcTxnMap["grp"]
- if hasGroupID {
- dstTxnMap["grp"] = groupID
- }
+ if !src.Txn.Txn.Group.IsZero() {
+ dst.Txn.Txn.Group = src.Txn.Txn.Group
}
- if dst.InnerTxns == nil || src.InnerTxns == nil {
+ if dst.Inners == nil || src.Inners == nil {
return
}
- assert.Equal(t, len(*dst.InnerTxns), len(*src.InnerTxns))
+ assert.Equal(t, len(*dst.Inners), len(*src.Inners))
- for innerIndex := range *dst.InnerTxns {
- if innerIndex == len(*src.InnerTxns) {
+ for innerIndex := range *dst.Inners {
+ if innerIndex == len(*src.Inners) {
break
}
- dstInner := &(*dst.InnerTxns)[innerIndex]
- srcInner := &(*src.InnerTxns)[innerIndex]
+ dstInner := &(*dst.Inners)[innerIndex]
+ srcInner := &(*src.Inners)[innerIndex]
copyInnerTxnGroupIDs(t, dstInner, srcInner)
}
}
-func assertSimulationResultsEqual(t *testing.T, expectedError string, expected, actual model.SimulateResponse) {
+func assertSimulationResultsEqual(t *testing.T, expectedError string, expected, actual v2.PreEncodedSimulateResponse) {
t.Helper()
if len(expectedError) != 0 {
require.NotNil(t, actual.TxnGroups[0].FailureMessage)
require.Contains(t, *actual.TxnGroups[0].FailureMessage, expectedError)
- require.False(t, expected.WouldSucceed, "Test case WouldSucceed value is not consistent with expected failure")
// if it matched the expected error, copy the actual one so it will pass the equality check below
expected.TxnGroups[0].FailureMessage = actual.TxnGroups[0].FailureMessage
}
@@ -798,23 +867,23 @@ func assertSimulationResultsEqual(t *testing.T, expectedError string, expected,
}
expectedGroup := &expected.TxnGroups[groupIndex]
actualGroup := &actual.TxnGroups[groupIndex]
- assert.Equal(t, len(expectedGroup.TxnResults), len(actualGroup.TxnResults))
- for txnIndex := range expectedGroup.TxnResults {
- if txnIndex == len(actualGroup.TxnResults) {
+ assert.Equal(t, len(expectedGroup.Txns), len(actualGroup.Txns))
+ for txnIndex := range expectedGroup.Txns {
+ if txnIndex == len(actualGroup.Txns) {
break
}
- expectedTxn := &expectedGroup.TxnResults[txnIndex]
- actualTxn := &actualGroup.TxnResults[txnIndex]
- if expectedTxn.TxnResult.InnerTxns == nil || actualTxn.TxnResult.InnerTxns == nil {
+ expectedTxn := &expectedGroup.Txns[txnIndex]
+ actualTxn := &actualGroup.Txns[txnIndex]
+ if expectedTxn.Txn.Inners == nil || actualTxn.Txn.Inners == nil {
continue
}
- assert.Equal(t, len(*expectedTxn.TxnResult.InnerTxns), len(*actualTxn.TxnResult.InnerTxns))
- for innerIndex := range *expectedTxn.TxnResult.InnerTxns {
- if innerIndex == len(*actualTxn.TxnResult.InnerTxns) {
+ assert.Equal(t, len(*expectedTxn.Txn.Inners), len(*actualTxn.Txn.Inners))
+ for innerIndex := range *expectedTxn.Txn.Inners {
+ if innerIndex == len(*actualTxn.Txn.Inners) {
break
}
- expectedInner := &(*expectedTxn.TxnResult.InnerTxns)[innerIndex]
- actualInner := &(*actualTxn.TxnResult.InnerTxns)[innerIndex]
+ expectedInner := &(*expectedTxn.Txn.Inners)[innerIndex]
+ actualInner := &(*actualTxn.Txn.Inners)[innerIndex]
copyInnerTxnGroupIDs(t, expectedInner, actualInner)
}
}
@@ -823,20 +892,20 @@ func assertSimulationResultsEqual(t *testing.T, expectedError string, expected,
require.Equal(t, expected, actual)
}
-func makePendingTxnResponse(t *testing.T, txn transactions.SignedTxnWithAD, handle codec.Handle) model.PendingTransactionResponse {
+func makePendingTxnResponse(t *testing.T, txn transactions.SignedTxnWithAD) v2.PreEncodedTxInfo {
t.Helper()
preEncoded := v2.ConvertInnerTxn(&txn)
- // encode to bytes
- var encodedBytes []byte
- encoder := codec.NewEncoderBytes(&encodedBytes, handle)
- err := encoder.Encode(&preEncoded)
- require.NoError(t, err)
+ // In theory we could return preEncoded directly, but there appears to be some subtle differences
+ // once you encode and decode the object, such as *uint64 fields turning from 0 to nil. So to be
+ // safe, let's encode and decode the object.
- // decode to model.PendingTransactionResponse
- var response model.PendingTransactionResponse
- decoder := codec.NewDecoderBytes(encodedBytes, handle)
- err = decoder.Decode(&response)
+ // Encode to bytes
+ encodedBytes := protocol.EncodeReflect(&preEncoded)
+
+ // Decode to v2.PreEncodedTxInfo
+ var response v2.PreEncodedTxInfo
+ err := protocol.DecodeReflect(encodedBytes, &response)
require.NoError(t, err)
return response
@@ -852,7 +921,7 @@ func TestSimulateTransaction(t *testing.T) {
mockLedger, roots, _, _, releasefunc := testingenvWithBalances(t, 999_998, 999_999, numAccounts, 1, offlineAccounts)
defer releasefunc()
dummyShutdownChan := make(chan struct{})
- mockNode := makeMockNode(mockLedger, t.Name(), nil, false)
+ mockNode := makeMockNode(mockLedger, t.Name(), nil, cannedStatusReportGolden, false)
handler := v2.Handlers{
Node: mockNode,
Log: logging.Base(),
@@ -868,7 +937,7 @@ func TestSimulateTransaction(t *testing.T) {
for name, scenarioFn := range scenarios {
t.Run(name, func(t *testing.T) { //nolint:paralleltest // Uses shared testing env
sender := roots[0]
- futureAppID := basics.AppIndex(2)
+ futureAppID := basics.AppIndex(1002)
payTxn := txnInfo.NewTxn(txntest.Txn{
Type: protocol.PaymentTx,
@@ -898,10 +967,14 @@ int 1`,
// build request body
var body io.Reader
- var bodyBytes []byte
- for _, stxn := range stxns {
- bodyBytes = append(bodyBytes, protocol.Encode(&stxn)...)
+ request := v2.PreEncodedSimulateRequest{
+ TxnGroups: []v2.PreEncodedSimulateRequestTransactionGroup{
+ {
+ Txns: stxns,
+ },
+ },
}
+ bodyBytes := protocol.EncodeReflect(&request)
msgpackFormat := model.SimulateTransactionParamsFormatMsgpack
jsonFormat := model.SimulateTransactionParamsFormatJson
@@ -948,7 +1021,7 @@ int 1`,
require.Equal(t, 200, rec.Code, rec.Body.String())
// decode actual response
- var actualBody model.SimulateResponse
+ var actualBody v2.PreEncodedSimulateResponse
decoder := codec.NewDecoderBytes(rec.Body.Bytes(), responseFormat.handle)
err = decoder.Decode(&actualBody)
require.NoError(t, err)
@@ -960,28 +1033,38 @@ int 1`,
clone[0]++
expectedFailedAt = &clone
}
- expectedBody := model.SimulateResponse{
- Version: 1,
- TxnGroups: []model.SimulateTransactionGroupResult{
+
+ var txnAppBudgetUsed []*uint64
+ appBudgetAdded := numOrNil(scenario.AppBudgetAdded)
+ appBudgetConsumed := numOrNil(scenario.AppBudgetConsumed)
+ for i := range scenario.TxnAppBudgetConsumed {
+ txnAppBudgetUsed = append(txnAppBudgetUsed, numOrNil(scenario.TxnAppBudgetConsumed[i]))
+ }
+ expectedBody := v2.PreEncodedSimulateResponse{
+ Version: 2,
+ TxnGroups: []v2.PreEncodedSimulateTxnGroupResult{
{
- FailedAt: expectedFailedAt,
- TxnResults: []model.SimulateTransactionResult{
+ AppBudgetAdded: appBudgetAdded,
+ AppBudgetConsumed: appBudgetConsumed,
+ FailedAt: expectedFailedAt,
+ Txns: []v2.PreEncodedSimulateTxnResult{
{
- TxnResult: makePendingTxnResponse(t, transactions.SignedTxnWithAD{
+ Txn: makePendingTxnResponse(t, transactions.SignedTxnWithAD{
SignedTxn: stxns[0],
// expect no ApplyData info
- }, responseFormat.handle),
+ }),
+ AppBudgetConsumed: txnAppBudgetUsed[0],
},
{
- TxnResult: makePendingTxnResponse(t, transactions.SignedTxnWithAD{
+ Txn: makePendingTxnResponse(t, transactions.SignedTxnWithAD{
SignedTxn: stxns[1],
ApplyData: scenario.ExpectedSimulationAD,
- }, responseFormat.handle),
+ }),
+ AppBudgetConsumed: txnAppBudgetUsed[1],
},
},
},
},
- WouldSucceed: scenario.Outcome == mocktracer.ApprovalOutcome,
}
assertSimulationResultsEqual(t, scenario.ExpectedError, expectedBody, actualBody)
})
@@ -1000,7 +1083,7 @@ func TestSimulateTransactionVerificationFailure(t *testing.T) {
mockLedger, roots, _, _, releasefunc := testingenv(t, numAccounts, 1, offlineAccounts)
defer releasefunc()
dummyShutdownChan := make(chan struct{})
- mockNode := makeMockNode(mockLedger, t.Name(), nil, false)
+ mockNode := makeMockNode(mockLedger, t.Name(), nil, cannedStatusReportGolden, false)
handler := v2.Handlers{
Node: mockNode,
Log: logging.Base(),
@@ -1040,6 +1123,73 @@ func TestSimulateTransactionVerificationFailure(t *testing.T) {
require.Equal(t, 400, rec.Code, rec.Body.String())
}
+func TestSimulateTransactionMultipleGroups(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // prepare node and handler
+ numAccounts := 5
+ offlineAccounts := true
+ mockLedger, roots, _, _, releasefunc := testingenv(t, numAccounts, 1, offlineAccounts)
+ defer releasefunc()
+ dummyShutdownChan := make(chan struct{})
+ mockNode := makeMockNode(mockLedger, t.Name(), nil, cannedStatusReportGolden, false)
+ handler := v2.Handlers{
+ Node: mockNode,
+ Log: logging.Base(),
+ Shutdown: dummyShutdownChan,
+ }
+
+ hdr, err := mockLedger.BlockHdr(mockLedger.Latest())
+ require.NoError(t, err)
+ txnInfo := simulationtesting.TxnInfo{LatestHeader: hdr}
+
+ sender := roots[0]
+ receiver := roots[1]
+
+ txn1 := txnInfo.NewTxn(txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: sender.Address(),
+ Receiver: receiver.Address(),
+ Amount: 1,
+ })
+ txn2 := txnInfo.NewTxn(txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: sender.Address(),
+ Receiver: receiver.Address(),
+ Amount: 2,
+ })
+
+ stxn1 := txn1.Txn().Sign(sender.Secrets())
+ stxn2 := txn2.Txn().Sign(sender.Secrets())
+
+ // build request body
+ request := v2.PreEncodedSimulateRequest{
+ TxnGroups: []v2.PreEncodedSimulateRequestTransactionGroup{
+ {
+ Txns: []transactions.SignedTxn{stxn1},
+ },
+ {
+ Txns: []transactions.SignedTxn{stxn2},
+ },
+ },
+ }
+ bodyBytes := protocol.EncodeReflect(&request)
+ body := bytes.NewReader(bodyBytes)
+ req := httptest.NewRequest(http.MethodPost, "/", body)
+ rec := httptest.NewRecorder()
+
+ e := echo.New()
+ c := e.NewContext(req, rec)
+
+ // simulate transaction
+ err = handler.SimulateTransaction(c, model.SimulateTransactionParams{})
+ require.NoError(t, err)
+ bodyString := rec.Body.String()
+ require.Equal(t, 400, rec.Code, bodyString)
+ require.Contains(t, bodyString, "expected 1 transaction group, got 2")
+}
+
func startCatchupTest(t *testing.T, catchpoint string, nodeError error, expectedCode int) {
numAccounts := 1
numTransactions := 1
@@ -1047,7 +1197,7 @@ func startCatchupTest(t *testing.T, catchpoint string, nodeError error, expected
mockLedger, _, _, _, releasefunc := testingenv(t, numAccounts, numTransactions, offlineAccounts)
defer releasefunc()
dummyShutdownChan := make(chan struct{})
- mockNode := makeMockNode(mockLedger, t.Name(), nodeError, false)
+ mockNode := makeMockNode(mockLedger, t.Name(), nodeError, cannedStatusReportGolden, false)
handler := v2.Handlers{Node: mockNode, Log: logging.Base(), Shutdown: dummyShutdownChan}
e := echo.New()
req := httptest.NewRequest(http.MethodPost, "/", nil)
@@ -1075,10 +1225,6 @@ func TestStartCatchup(t *testing.T) {
badCatchPoint := "bad catchpoint"
startCatchupTest(t, badCatchPoint, nil, 400)
-
- // Test that a catchup fails w/ 400 when the catchpoint round is > syncRound (while syncRound is set)
- syncRoundError := node.MakeCatchpointSyncRoundFailure(goodCatchPoint, 1)
- startCatchupTest(t, goodCatchPoint, syncRoundError, 400)
}
func abortCatchupTest(t *testing.T, catchpoint string, expectedCode int) {
@@ -1088,7 +1234,7 @@ func abortCatchupTest(t *testing.T, catchpoint string, expectedCode int) {
mockLedger, _, _, _, releasefunc := testingenv(t, numAccounts, numTransactions, offlineAccounts)
defer releasefunc()
dummyShutdownChan := make(chan struct{})
- mockNode := makeMockNode(mockLedger, t.Name(), nil, false)
+ mockNode := makeMockNode(mockLedger, t.Name(), nil, cannedStatusReportGolden, false)
handler := v2.Handlers{
Node: mockNode,
Log: logging.Base(),
@@ -1123,7 +1269,7 @@ func tealCompileTest(t *testing.T, bytesToUse []byte, expectedCode int,
mockLedger, _, _, _, releasefunc := testingenv(t, numAccounts, numTransactions, offlineAccounts)
defer releasefunc()
dummyShutdownChan := make(chan struct{})
- mockNode := makeMockNode(mockLedger, t.Name(), nil, false)
+ mockNode := makeMockNode(mockLedger, t.Name(), nil, cannedStatusReportGolden, false)
mockNode.config.EnableDeveloperAPI = enableDeveloperAPI
handler := v2.Handlers{
Node: mockNode,
@@ -1194,7 +1340,7 @@ func tealDisassembleTest(t *testing.T, program []byte, expectedCode int,
mockLedger, _, _, _, releasefunc := testingenv(t, numAccounts, numTransactions, offlineAccounts)
defer releasefunc()
dummyShutdownChan := make(chan struct{})
- mockNode := makeMockNode(mockLedger, t.Name(), nil, false)
+ mockNode := makeMockNode(mockLedger, t.Name(), nil, cannedStatusReportGolden, false)
mockNode.config.EnableDeveloperAPI = enableDeveloperAPI
handler := v2.Handlers{
Node: mockNode,
@@ -1245,6 +1391,11 @@ func TestTealDisassemble(t *testing.T) {
// Test bad program.
badProgram := []byte{1, 99}
tealDisassembleTest(t, badProgram, 400, "invalid opcode", true)
+
+ // Create a program with MaxTealSourceBytes+1 bytes
+ // This should fail inside the handler when reading the bytes from the request body.
+ largeProgram := []byte(strings.Repeat("a", v2.MaxTealSourceBytes+1))
+ tealDisassembleTest(t, largeProgram, 400, "http: request body too large", true)
}
func tealDryrunTest(
@@ -1257,7 +1408,7 @@ func tealDryrunTest(
mockLedger, _, _, _, releasefunc := testingenv(t, numAccounts, numTransactions, offlineAccounts)
defer releasefunc()
dummyShutdownChan := make(chan struct{})
- mockNode := makeMockNode(mockLedger, t.Name(), nil, false)
+ mockNode := makeMockNode(mockLedger, t.Name(), nil, cannedStatusReportGolden, false)
mockNode.config.EnableDeveloperAPI = enableDeveloperAPI
handler := v2.Handlers{
Node: mockNode,
@@ -1291,6 +1442,12 @@ func tealDryrunTest(
messages := *response.Txns[0].AppCallMessages
require.GreaterOrEqual(t, len(messages), 1)
require.Equal(t, expResult, messages[len(messages)-1])
+ } else if rec.Code == 400 {
+ var response model.ErrorResponse
+ data := rec.Body.Bytes()
+ err = protocol.DecodeJSON(data, &response)
+ require.NoError(t, err, string(data))
+ require.Contains(t, response.Message, expResult)
}
return
}
@@ -1356,11 +1513,11 @@ func TestTealDryrun(t *testing.T) {
tealDryrunTest(t, &gdr, "msgp", 404, "", false)
gdr.ProtocolVersion = "unk"
- tealDryrunTest(t, &gdr, "json", 400, "", true)
+ tealDryrunTest(t, &gdr, "json", 400, "unsupported protocol version", true)
gdr.ProtocolVersion = ""
ddr := tealDryrunTest(t, &gdr, "json", 200, "PASS", true)
- require.Equal(t, string(protocol.ConsensusCurrentVersion), ddr.ProtocolVersion)
+ require.Equal(t, string(protocol.ConsensusFuture), ddr.ProtocolVersion)
gdr.ProtocolVersion = string(protocol.ConsensusFuture)
ddr = tealDryrunTest(t, &gdr, "json", 200, "PASS", true)
require.Equal(t, string(protocol.ConsensusFuture), ddr.ProtocolVersion)
@@ -1369,6 +1526,10 @@ func TestTealDryrun(t *testing.T) {
tealDryrunTest(t, &gdr, "json", 200, "REJECT", true)
tealDryrunTest(t, &gdr, "msgp", 200, "REJECT", true)
tealDryrunTest(t, &gdr, "json", 404, "", false)
+
+ // This should fail inside the handler when reading the bytes from the request body.
+ gdr.ProtocolVersion = strings.Repeat("a", v2.MaxTealDryrunBytes+1)
+ tealDryrunTest(t, &gdr, "json", 400, "http: request body too large", true)
}
func TestAppendParticipationKeys(t *testing.T) {
@@ -1376,7 +1537,7 @@ func TestAppendParticipationKeys(t *testing.T) {
mockLedger, _, _, _, releasefunc := testingenv(t, 1, 1, true)
defer releasefunc()
- mockNode := makeMockNode(mockLedger, t.Name(), nil, false)
+ mockNode := makeMockNode(mockLedger, t.Name(), nil, cannedStatusReportGolden, false)
handler := v2.Handlers{
Node: mockNode,
Log: logging.Base(),
@@ -1460,7 +1621,7 @@ func TestAppendParticipationKeys(t *testing.T) {
t.Run("Internal error", func(t *testing.T) {
// Create mock node with an error.
expectedErr := errors.New("expected error")
- mockNode := makeMockNode(mockLedger, t.Name(), expectedErr, false)
+ mockNode := makeMockNode(mockLedger, t.Name(), expectedErr, cannedStatusReportGolden, false)
handler := v2.Handlers{
Node: mockNode,
Log: logging.Base(),
@@ -1558,12 +1719,13 @@ func newEmptyBlock(a *require.Assertions, lastBlock bookkeeping.Block, genBlk bo
var blk bookkeeping.Block
blk.BlockHeader = bookkeeping.BlockHeader{
- GenesisID: genBlk.GenesisID(),
- GenesisHash: genBlk.GenesisHash(),
- Round: l.Latest() + 1,
- Branch: latestBlock.Hash(),
- RewardsState: latestBlock.NextRewardsState(l.Latest()+1, proto, poolBal.MicroAlgos, totalRewardUnits, logging.Base()),
- UpgradeState: latestBlock.UpgradeState,
+ GenesisID: genBlk.GenesisID(),
+ GenesisHash: genBlk.GenesisHash(),
+ Round: l.Latest() + 1,
+ Branch: latestBlock.Hash(),
+ RewardsState: latestBlock.NextRewardsState(l.Latest()+1, proto, poolBal.MicroAlgos, totalRewardUnits, logging.Base()),
+ UpgradeState: latestBlock.UpgradeState,
+ StateProofTracking: latestBlock.StateProofTracking,
}
blk.BlockHeader.TxnCounter = latestBlock.TxnCounter
@@ -1580,22 +1742,19 @@ func newEmptyBlock(a *require.Assertions, lastBlock bookkeeping.Block, genBlk bo
return blk
}
-func addStateProofIfNeeded(blk bookkeeping.Block) bookkeeping.Block {
+func addStateProof(blk bookkeeping.Block) bookkeeping.Block {
round := uint64(blk.Round())
- if round%stateProofIntervalForHandlerTests == (stateProofIntervalForHandlerTests/2+18) && round > stateProofIntervalForHandlerTests*2 {
- return blk
- }
- stateProofRound := (round - round%stateProofIntervalForHandlerTests) - stateProofIntervalForHandlerTests
+ stateProofRound := (round/stateProofInterval - 1) * stateProofInterval
tx := transactions.SignedTxn{
Txn: transactions.Transaction{
Type: protocol.StateProofTx,
- Header: transactions.Header{Sender: transactions.StateProofSender},
+ Header: transactions.Header{Sender: transactions.StateProofSender, FirstValid: blk.Round()},
StateProofTxnFields: transactions.StateProofTxnFields{
StateProofType: 0,
Message: stateproofmsg.Message{
BlockHeadersCommitment: []byte{0x0, 0x1, 0x2},
FirstAttestedRound: stateProofRound + 1,
- LastAttestedRound: stateProofRound + stateProofIntervalForHandlerTests,
+ LastAttestedRound: stateProofRound + stateProofInterval,
},
},
},
@@ -1603,19 +1762,39 @@ func addStateProofIfNeeded(blk bookkeeping.Block) bookkeeping.Block {
txnib := transactions.SignedTxnInBlock{SignedTxnWithAD: transactions.SignedTxnWithAD{SignedTxn: tx}}
blk.Payset = append(blk.Payset, txnib)
+ updatedStateProofTracking := bookkeeping.StateProofTrackingData{
+ StateProofVotersCommitment: blk.BlockHeader.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment,
+ StateProofOnlineTotalWeight: blk.BlockHeader.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight,
+ StateProofNextRound: blk.BlockHeader.StateProofTracking[protocol.StateProofBasic].StateProofNextRound + basics.Round(stateProofInterval),
+ }
+ blk.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData)
+ blk.BlockHeader.StateProofTracking[protocol.StateProofBasic] = updatedStateProofTracking
+
return blk
}
func insertRounds(a *require.Assertions, h v2.Handlers, numRounds int) {
ledger := h.Node.LedgerForAPI()
+ firstStateProof := basics.Round(stateProofInterval * 2)
genBlk, err := ledger.Block(0)
a.NoError(err)
+ genBlk.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData)
+ genBlk.BlockHeader.StateProofTracking[protocol.StateProofBasic] = bookkeeping.StateProofTrackingData{
+ StateProofVotersCommitment: nil,
+ StateProofOnlineTotalWeight: basics.MicroAlgos{},
+ StateProofNextRound: firstStateProof,
+ }
lastBlk := genBlk
for i := 0; i < numRounds; i++ {
blk := newEmptyBlock(a, lastBlk, genBlk, ledger)
- blk = addStateProofIfNeeded(blk)
+ round := uint64(blk.Round())
+ // Add a StateProof transaction after half of the interval has passed (128 rounds) and add another 18 round for good measure
+ // First StateProof should be 2*Interval, since the first commitment cannot be in genesis
+ if blk.Round() > firstStateProof && (round%stateProofInterval == (stateProofInterval/2 + 18)) {
+ blk = addStateProof(blk)
+ }
blk.BlockHeader.CurrentProtocol = protocol.ConsensusCurrentVersion
a.NoError(ledger.(*data.Ledger).AddBlock(blk, agreement.Certificate{}))
lastBlk = blk
@@ -1626,7 +1805,7 @@ func TestStateProofNotFound(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
- handler, ctx, responseRecorder, _, _, releasefunc := setupTestForMethodGet(t, false)
+ handler, ctx, responseRecorder, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden)
defer releasefunc()
insertRounds(a, handler, 700)
@@ -1639,7 +1818,7 @@ func TestStateProofHigherRoundThanLatest(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
- handler, ctx, responseRecorder, _, _, releasefunc := setupTestForMethodGet(t, false)
+ handler, ctx, responseRecorder, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden)
defer releasefunc()
a.NoError(handler.GetStateProof(ctx, 2))
@@ -1650,12 +1829,12 @@ func TestStateProof200(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
- handler, ctx, responseRecorder, _, _, releasefunc := setupTestForMethodGet(t, false)
+ handler, ctx, responseRecorder, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden)
defer releasefunc()
insertRounds(a, handler, 1000)
- a.NoError(handler.GetStateProof(ctx, stateProofIntervalForHandlerTests+1))
+ a.NoError(handler.GetStateProof(ctx, stateProofInterval+1))
a.Equal(200, responseRecorder.Code)
stprfResp := model.StateProofResponse{}
@@ -1668,7 +1847,7 @@ func TestHeaderProofRoundTooHigh(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
- handler, ctx, responseRecorder, _, _, releasefunc := setupTestForMethodGet(t, false)
+ handler, ctx, responseRecorder, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden)
defer releasefunc()
a.NoError(handler.GetLightBlockHeaderProof(ctx, 2))
@@ -1679,7 +1858,7 @@ func TestHeaderProofStateProofNotFound(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
- handler, ctx, responseRecorder, _, _, releasefunc := setupTestForMethodGet(t, false)
+ handler, ctx, responseRecorder, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden)
defer releasefunc()
insertRounds(a, handler, 700)
@@ -1692,18 +1871,18 @@ func TestGetBlockProof200(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
- handler, ctx, responseRecorder, _, _, releasefunc := setupTestForMethodGet(t, false)
+ handler, ctx, responseRecorder, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden)
defer releasefunc()
insertRounds(a, handler, 1000)
- a.NoError(handler.GetLightBlockHeaderProof(ctx, stateProofIntervalForHandlerTests*2+2))
+ a.NoError(handler.GetLightBlockHeaderProof(ctx, stateProofInterval*2+2))
a.Equal(200, responseRecorder.Code)
- blkHdrArr, err := stateproof.FetchLightHeaders(handler.Node.LedgerForAPI(), stateProofIntervalForHandlerTests, basics.Round(stateProofIntervalForHandlerTests*3))
+ blkHdrArr, err := stateproof.FetchLightHeaders(handler.Node.LedgerForAPI(), stateProofInterval, basics.Round(stateProofInterval*3))
a.NoError(err)
- leafproof, err := stateproof.GenerateProofOfLightBlockHeaders(stateProofIntervalForHandlerTests, blkHdrArr, 1)
+ leafproof, err := stateproof.GenerateProofOfLightBlockHeaders(stateProofInterval, blkHdrArr, 1)
a.NoError(err)
proofResp := model.LightBlockHeaderProofResponse{}
@@ -1725,27 +1904,27 @@ func TestStateproofTransactionForRound(t *testing.T) {
CurrentProtocol: protocol.ConsensusCurrentVersion,
},
}
- blk = addStateProofIfNeeded(blk)
+ blk = addStateProof(blk)
ledger.blocks = append(ledger.blocks, blk)
}
ctx, cncl := context.WithTimeout(context.Background(), time.Minute*2)
defer cncl()
- txn, err := v2.GetStateProofTransactionForRound(ctx, &ledger, basics.Round(stateProofIntervalForHandlerTests*2+1), 1000, nil)
+ txn, err := v2.GetStateProofTransactionForRound(ctx, &ledger, basics.Round(stateProofInterval*2+1), 1000, nil)
a.NoError(err)
- a.Equal(2*stateProofIntervalForHandlerTests+1, txn.Message.FirstAttestedRound)
- a.Equal(3*stateProofIntervalForHandlerTests, txn.Message.LastAttestedRound)
+ a.Equal(2*stateProofInterval+1, txn.Message.FirstAttestedRound)
+ a.Equal(3*stateProofInterval, txn.Message.LastAttestedRound)
a.Equal([]byte{0x0, 0x1, 0x2}, txn.Message.BlockHeadersCommitment)
- txn, err = v2.GetStateProofTransactionForRound(ctx, &ledger, basics.Round(2*stateProofIntervalForHandlerTests), 1000, nil)
+ txn, err = v2.GetStateProofTransactionForRound(ctx, &ledger, basics.Round(2*stateProofInterval), 1000, nil)
a.NoError(err)
- a.Equal(stateProofIntervalForHandlerTests+1, txn.Message.FirstAttestedRound)
- a.Equal(2*stateProofIntervalForHandlerTests, txn.Message.LastAttestedRound)
+ a.Equal(stateProofInterval+1, txn.Message.FirstAttestedRound)
+ a.Equal(2*stateProofInterval, txn.Message.LastAttestedRound)
txn, err = v2.GetStateProofTransactionForRound(ctx, &ledger, 999, 1000, nil)
a.ErrorIs(err, v2.ErrNoStateProofForRound)
- txn, err = v2.GetStateProofTransactionForRound(ctx, &ledger, basics.Round(2*stateProofIntervalForHandlerTests), basics.Round(2*stateProofIntervalForHandlerTests), nil)
+ txn, err = v2.GetStateProofTransactionForRound(ctx, &ledger, basics.Round(2*stateProofInterval), basics.Round(2*stateProofInterval), nil)
a.ErrorIs(err, v2.ErrNoStateProofForRound)
}
@@ -1762,12 +1941,12 @@ func TestStateproofTransactionForRoundWithoutStateproofs(t *testing.T) {
CurrentProtocol: protocol.ConsensusV30, // should have StateProofInterval == 0 .
},
}
- blk = addStateProofIfNeeded(blk)
+ blk = addStateProof(blk)
ledger.blocks = append(ledger.blocks, blk)
}
ctx, cncl := context.WithTimeout(context.Background(), time.Minute)
defer cncl()
- _, err := v2.GetStateProofTransactionForRound(ctx, &ledger, basics.Round(stateProofIntervalForHandlerTests*2+1), 1000, nil)
+ _, err := v2.GetStateProofTransactionForRound(ctx, &ledger, basics.Round(stateProofInterval*2+1), 1000, nil)
a.ErrorIs(err, v2.ErrNoStateProofForRound)
}
@@ -1784,13 +1963,13 @@ func TestStateproofTransactionForRoundTimeouts(t *testing.T) {
CurrentProtocol: protocol.ConsensusCurrentVersion, // should have StateProofInterval != 0 .
},
}
- blk = addStateProofIfNeeded(blk)
+ blk = addStateProof(blk)
ledger.blocks = append(ledger.blocks, blk)
}
ctx, cncl := context.WithTimeout(context.Background(), time.Nanosecond)
defer cncl()
- _, err := v2.GetStateProofTransactionForRound(ctx, &ledger, basics.Round(stateProofIntervalForHandlerTests*2+1), 1000, nil)
+ _, err := v2.GetStateProofTransactionForRound(ctx, &ledger, basics.Round(stateProofInterval*2+1), 1000, nil)
a.ErrorIs(err, v2.ErrTimeout)
}
@@ -1807,7 +1986,7 @@ func TestStateproofTransactionForRoundShutsDown(t *testing.T) {
CurrentProtocol: protocol.ConsensusCurrentVersion, // should have StateProofInterval != 0 .
},
}
- blk = addStateProofIfNeeded(blk)
+ blk = addStateProof(blk)
ledger.blocks = append(ledger.blocks, blk)
}
@@ -1815,7 +1994,7 @@ func TestStateproofTransactionForRoundShutsDown(t *testing.T) {
close(stoppedChan)
ctx, cncl := context.WithTimeout(context.Background(), time.Minute)
defer cncl()
- _, err := v2.GetStateProofTransactionForRound(ctx, &ledger, basics.Round(stateProofIntervalForHandlerTests*2+1), 1000, stoppedChan)
+ _, err := v2.GetStateProofTransactionForRound(ctx, &ledger, basics.Round(stateProofInterval*2+1), 1000, stoppedChan)
a.ErrorIs(err, v2.ErrShutdown)
}
@@ -1823,7 +2002,7 @@ func TestExperimentalCheck(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, false)
+ handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden)
defer releasefunc()
// Since we are invoking the method directly, it doesn't matter if EnableExperimentalAPI is true.
@@ -1834,3 +2013,193 @@ func TestExperimentalCheck(t *testing.T) {
require.Equal(t, 200, rec.Code)
require.Equal(t, "true\n", string(rec.Body.Bytes()))
}
+
+func TestTimestampOffsetNotInDevMode(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden)
+ defer releasefunc()
+
+ // TestGetBlockTimeStampOffset 400 - offset is not set and mock node is
+ // not in dev mode
+ err := handler.GetBlockTimeStampOffset(c)
+ require.NoError(t, err)
+ require.Equal(t, 400, rec.Code)
+ require.Equal(t, "{\"message\":\"failed retrieving timestamp offset from node: cannot get block timestamp offset because we are not in dev mode\"}\n", rec.Body.String())
+ c, rec = newReq(t)
+
+ // TestSetBlockTimeStampOffset 400 - cannot set timestamp offset when not
+ // in dev mode
+ err = handler.SetBlockTimeStampOffset(c, 1)
+ require.NoError(t, err)
+ require.Equal(t, 400, rec.Code)
+ require.Equal(t, "{\"message\":\"failed to set timestamp offset on the node: cannot set block timestamp when not in dev mode\"}\n", rec.Body.String())
+}
+
+func TestTimestampOffsetInDevMode(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ handler, c, rec, _, _, releasefunc := setupMockNodeForMethodGet(t, cannedStatusReportGolden, true)
+ defer releasefunc()
+
+ // TestGetBlockTimeStampOffset 404
+ err := handler.GetBlockTimeStampOffset(c)
+ require.NoError(t, err)
+ require.Equal(t, 404, rec.Code)
+ require.Equal(t, "{\"message\":\"failed retrieving timestamp offset from node: block timestamp offset was never set, using real clock for timestamps\"}\n", rec.Body.String())
+ c, rec = newReq(t)
+
+ // TestSetBlockTimeStampOffset 200
+ err = handler.SetBlockTimeStampOffset(c, 1)
+ require.NoError(t, err)
+ require.Equal(t, 200, rec.Code)
+ c, rec = newReq(t)
+
+ // TestGetBlockTimeStampOffset 200
+ err = handler.GetBlockTimeStampOffset(c)
+ require.NoError(t, err)
+ require.Equal(t, 200, rec.Code)
+ c, rec = newReq(t)
+
+ // TestSetBlockTimeStampOffset 400
+ err = handler.SetBlockTimeStampOffset(c, math.MaxUint64)
+ require.NoError(t, err)
+ require.Equal(t, 400, rec.Code)
+ require.Equal(t, "{\"message\":\"failed to set timestamp offset on the node: block timestamp offset cannot be larger than max int64 value\"}\n", rec.Body.String())
+}
+
+func TestDeltasForTxnGroup(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ blk1 := bookkeeping.BlockHeader{Round: 1}
+ blk2 := bookkeeping.BlockHeader{Round: 2}
+ delta1 := ledgercore.StateDelta{Hdr: &blk1}
+ delta2 := ledgercore.StateDelta{Hdr: &blk2, KvMods: map[string]ledgercore.KvValueDelta{"bx1": {Data: []byte("foobar")}}}
+ txn1 := transactions.SignedTxnWithAD{SignedTxn: transactions.SignedTxn{Txn: transactions.Transaction{Type: protocol.PaymentTx}}}
+ groupID1, err := crypto.DigestFromString(crypto.Hash([]byte("hello")).String())
+ require.NoError(t, err)
+ txn2 := transactions.SignedTxnWithAD{SignedTxn: transactions.SignedTxn{Txn: transactions.Transaction{
+ Type: protocol.AssetTransferTx,
+ Header: transactions.Header{Group: groupID1}},
+ }}
+
+ tracer := eval.MakeTxnGroupDeltaTracer(2)
+ handlers := v2.Handlers{
+ Node: &mockNode{
+ ledger: &mockLedger{
+ tracer: tracer,
+ },
+ },
+ Log: logging.Base(),
+ }
+ e := echo.New()
+ req := httptest.NewRequest(http.MethodGet, "/", nil)
+ rec := httptest.NewRecorder()
+ c := e.NewContext(req, rec)
+ // Add blocks to tracer
+ tracer.BeforeBlock(&blk1)
+ tracer.AfterTxnGroup(&logic.EvalParams{TxnGroup: []transactions.SignedTxnWithAD{txn1}}, &delta1, nil)
+ tracer.BeforeBlock(&blk2)
+ tracer.AfterTxnGroup(&logic.EvalParams{TxnGroup: []transactions.SignedTxnWithAD{txn2}}, &delta2, nil)
+
+ // Test /v2/deltas/{round}/txn/group
+ jsonFormatForRound := model.GetTransactionGroupLedgerStateDeltasForRoundParamsFormatJson
+ err = handlers.GetTransactionGroupLedgerStateDeltasForRound(
+ c,
+ uint64(1),
+ model.GetTransactionGroupLedgerStateDeltasForRoundParams{Format: &jsonFormatForRound},
+ )
+ require.NoError(t, err)
+
+ var roundResponse model.TransactionGroupLedgerStateDeltasForRoundResponse
+ err = json.Unmarshal(rec.Body.Bytes(), &roundResponse)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(roundResponse.Deltas))
+ require.Equal(t, []string{txn1.ID().String()}, roundResponse.Deltas[0].Ids)
+ hdr, ok := roundResponse.Deltas[0].Delta["Hdr"].(map[string]interface{})
+ require.True(t, ok)
+ require.Equal(t, delta1.Hdr.Round, basics.Round(hdr["rnd"].(float64)))
+
+ // Test invalid round parameter
+ c, rec = newReq(t)
+ err = handlers.GetTransactionGroupLedgerStateDeltasForRound(
+ c,
+ uint64(4),
+ model.GetTransactionGroupLedgerStateDeltasForRoundParams{Format: &jsonFormatForRound},
+ )
+ require.NoError(t, err)
+ require.Equal(t, 404, rec.Code)
+
+ // Test /v2/deltas/txn/group/{id}
+ jsonFormatForTxn := model.GetLedgerStateDeltaForTransactionGroupParamsFormatJson
+ c, rec = newReq(t)
+ // Use TxID
+ err = handlers.GetLedgerStateDeltaForTransactionGroup(
+ c,
+ txn2.Txn.ID().String(),
+ model.GetLedgerStateDeltaForTransactionGroupParams{Format: &jsonFormatForTxn},
+ )
+ require.NoError(t, err)
+ var groupResponse model.LedgerStateDeltaForTransactionGroupResponse
+ err = json.Unmarshal(rec.Body.Bytes(), &groupResponse)
+ require.NoError(t, err)
+ groupHdr, ok := groupResponse["Hdr"].(map[string]interface{})
+ require.True(t, ok)
+ require.Equal(t, delta2.Hdr.Round, basics.Round(groupHdr["rnd"].(float64)))
+
+ // Use Group ID
+ c, rec = newReq(t)
+ err = handlers.GetLedgerStateDeltaForTransactionGroup(
+ c,
+ groupID1.String(),
+ model.GetLedgerStateDeltaForTransactionGroupParams{Format: &jsonFormatForTxn},
+ )
+ require.NoError(t, err)
+ err = json.Unmarshal(rec.Body.Bytes(), &groupResponse)
+ require.NoError(t, err)
+ require.NotNil(t, groupResponse["KvMods"])
+ groupHdr, ok = groupResponse["Hdr"].(map[string]interface{})
+ require.True(t, ok)
+ require.Equal(t, delta2.Hdr.Round, basics.Round(groupHdr["rnd"].(float64)))
+
+ // Test invalid ID
+ c, rec = newReq(t)
+ badID := crypto.Hash([]byte("invalidID")).String()
+ err = handlers.GetLedgerStateDeltaForTransactionGroup(
+ c,
+ badID,
+ model.GetLedgerStateDeltaForTransactionGroupParams{Format: &jsonFormatForTxn},
+ )
+ require.NoError(t, err)
+ require.Equal(t, 404, rec.Code)
+
+ // Test nil Tracer
+ nilTracerHandler := v2.Handlers{
+ Node: &mockNode{
+ ledger: &mockLedger{
+ tracer: nil,
+ },
+ },
+ Log: logging.Base(),
+ }
+ c, rec = newReq(t)
+ err = nilTracerHandler.GetLedgerStateDeltaForTransactionGroup(
+ c,
+ groupID1.String(),
+ model.GetLedgerStateDeltaForTransactionGroupParams{Format: &jsonFormatForTxn},
+ )
+ require.NoError(t, err)
+ require.Equal(t, 501, rec.Code)
+
+ c, rec = newReq(t)
+ err = nilTracerHandler.GetTransactionGroupLedgerStateDeltasForRound(
+ c,
+ 0,
+ model.GetTransactionGroupLedgerStateDeltasForRoundParams{Format: &jsonFormatForRound},
+ )
+ require.NoError(t, err)
+ require.Equal(t, 501, rec.Code)
+}
diff --git a/daemon/algod/api/server/v2/test/helpers.go b/daemon/algod/api/server/v2/test/helpers.go
index 8b3ac5f8f..bcc1e15d8 100644
--- a/daemon/algod/api/server/v2/test/helpers.go
+++ b/daemon/algod/api/server/v2/test/helpers.go
@@ -64,30 +64,6 @@ var cannedStatusReportGolden = node.StatusReport{
LastCatchpoint: "",
}
-var cannedStatusReportConsensusUpgradeGolden = node.StatusReport{
- LastRound: basics.Round(97000),
- LastVersion: protocol.ConsensusCurrentVersion,
- NextVersion: protocol.ConsensusCurrentVersion,
- NextVersionRound: 200000,
- NextVersionSupported: true,
- StoppedAtUnsupportedRound: true,
- Catchpoint: "",
- CatchpointCatchupAcquiredBlocks: 0,
- CatchpointCatchupProcessedAccounts: 0,
- CatchpointCatchupVerifiedAccounts: 0,
- CatchpointCatchupTotalAccounts: 0,
- CatchpointCatchupTotalKVs: 0,
- CatchpointCatchupProcessedKVs: 0,
- CatchpointCatchupVerifiedKVs: 0,
- CatchpointCatchupTotalBlocks: 0,
- LastCatchpoint: "",
- UpgradePropose: "upgradePropose",
- UpgradeApprove: false,
- UpgradeDelay: 0,
- NextProtocolVoteBefore: 100000,
- NextProtocolApprovals: 5000,
-}
-
var poolAddrRewardBaseGolden = uint64(0)
var poolAddrAssetsGolden = make([]model.AssetHolding, 0)
var poolAddrCreatedAssetsGolden = make([]model.Asset, 0)
@@ -114,14 +90,16 @@ var txnPoolGolden = make([]transactions.SignedTxn, 2)
// package `data` and package `node`, which themselves import `mocks`
type mockNode struct {
mock.Mock
- ledger v2.LedgerForAPI
- genesisID string
- config config.Local
- err error
- id account.ParticipationID
- keys account.StateProofKeys
- usertxns map[basics.Address][]node.TxnWithStatus
- consensusUpgrade bool
+ ledger v2.LedgerForAPI
+ genesisID string
+ config config.Local
+ err error
+ id account.ParticipationID
+ keys account.StateProofKeys
+ usertxns map[basics.Address][]node.TxnWithStatus
+ status node.StatusReport
+ devmode bool
+ timestampOffset *int64
}
func (m *mockNode) InstallParticipationKey(partKeyBinary []byte) (account.ParticipationID, error) {
@@ -159,28 +137,23 @@ func (m *mockNode) AppendParticipationKeys(id account.ParticipationID, keys acco
return m.err
}
-func makeMockNode(ledger v2.LedgerForAPI, genesisID string, nodeError error, consensusUpgrade bool) *mockNode {
+func makeMockNode(ledger v2.LedgerForAPI, genesisID string, nodeError error, status node.StatusReport, devMode bool) *mockNode {
return &mockNode{
- ledger: ledger,
- genesisID: genesisID,
- config: config.GetDefaultLocal(),
- err: nodeError,
- usertxns: map[basics.Address][]node.TxnWithStatus{},
- consensusUpgrade: consensusUpgrade,
+ ledger: ledger,
+ genesisID: genesisID,
+ config: config.GetDefaultLocal(),
+ err: nodeError,
+ usertxns: map[basics.Address][]node.TxnWithStatus{},
+ status: status,
+ devmode: devMode,
}
}
func (m *mockNode) LedgerForAPI() v2.LedgerForAPI {
return m.ledger
}
-
-func (m mockNode) Status() (s node.StatusReport, err error) {
- if m.consensusUpgrade {
- s = cannedStatusReportConsensusUpgradeGolden
- } else {
- s = cannedStatusReportGolden
- }
- return
+func (m *mockNode) Status() (node.StatusReport, error) {
+ return m.status, nil
}
func (m *mockNode) GenesisID() string {
return m.genesisID
@@ -194,9 +167,9 @@ func (m *mockNode) BroadcastSignedTxGroup(txgroup []transactions.SignedTxn) erro
return m.err
}
-func (m *mockNode) Simulate(txgroup []transactions.SignedTxn) (simulation.Result, error) {
+func (m *mockNode) Simulate(request simulation.Request) (simulation.Result, error) {
simulator := simulation.MakeSimulator(m.ledger.(*data.Ledger))
- return simulator.Simulate(txgroup)
+ return simulator.Simulate(request)
}
func (m *mockNode) GetPendingTransaction(txID transactions.Txid) (res node.TxnWithStatus, found bool) {
@@ -268,6 +241,23 @@ func (m *mockNode) AbortCatchup(catchpoint string) error {
return m.err
}
+func (m *mockNode) SetBlockTimeStampOffset(offset int64) error {
+ if !m.devmode {
+ return fmt.Errorf("cannot set block timestamp when not in dev mode")
+ }
+ m.timestampOffset = &offset
+ return nil
+}
+
+func (m *mockNode) GetBlockTimeStampOffset() (*int64, error) {
+ if !m.devmode {
+ return nil, fmt.Errorf("cannot get block timestamp when not in dev mode")
+ } else if m.timestampOffset == nil {
+ return nil, nil
+ }
+ return m.timestampOffset, nil
+}
+
////// mock ledger testing environment follows
var sinkAddr = basics.Address{0x7, 0xda, 0xcb, 0x4b, 0x6d, 0x9e, 0xd1, 0x41, 0xb1, 0x75, 0x76, 0xbd, 0x45, 0x9a, 0xe6, 0x42, 0x1d, 0x48, 0x6d, 0xa3, 0xd4, 0xef, 0x22, 0x47, 0xc4, 0x9, 0xa3, 0x96, 0xb8, 0x2e, 0xa2, 0x21}
@@ -276,7 +266,7 @@ var genesisHash = crypto.Digest{0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
var genesisID = "testingid"
var retOneProgram = []byte{2, 0x20, 1, 1, 0x22}
-var proto = config.Consensus[protocol.ConsensusCurrentVersion]
+var proto = config.Consensus[protocol.ConsensusFuture]
func testingenv(t testing.TB, numAccounts, numTxs int, offlineAccounts bool) (*data.Ledger, []account.Root, []account.Participation, []transactions.SignedTxn, func()) {
minMoneyAtStart := 100000 // min money start
@@ -323,7 +313,7 @@ func testingenvWithBalances(t testing.TB, minMoneyAtStart, maxMoneyAtStart, numA
}
accessors = append(accessors, access)
- part, err := account.FillDBWithParticipationKeys(access, root.Address(), 0, lastValid, config.Consensus[protocol.ConsensusCurrentVersion].DefaultKeyDilution)
+ part, err := account.FillDBWithParticipationKeys(access, root.Address(), 0, lastValid, proto.DefaultKeyDilution)
if err != nil {
panic(err)
}
@@ -361,7 +351,7 @@ func testingenvWithBalances(t testing.TB, minMoneyAtStart, maxMoneyAtStart, numA
const inMem = true
cfg := config.GetDefaultLocal()
cfg.Archival = true
- ledger, err := data.LoadLedger(logging.Base(), t.Name(), inMem, protocol.ConsensusCurrentVersion, bootstrap, genesisID, genesisHash, nil, cfg)
+ ledger, err := data.LoadLedger(logging.Base(), t.Name(), inMem, protocol.ConsensusFuture, bootstrap, genesisID, genesisHash, nil, cfg)
if err != nil {
panic(err)
}
diff --git a/daemon/algod/api/server/v2/utils.go b/daemon/algod/api/server/v2/utils.go
index b2b4e4a35..0aa4c9c94 100644
--- a/daemon/algod/api/server/v2/utils.go
+++ b/daemon/algod/api/server/v2/utils.go
@@ -62,6 +62,10 @@ func notFound(ctx echo.Context, internal error, external string, log logging.Log
return returnError(ctx, http.StatusNotFound, internal, external, log)
}
+func notImplemented(ctx echo.Context, internal error, external string, log logging.Logger) error {
+ return returnError(ctx, http.StatusNotImplemented, internal, external, log)
+}
+
func addrOrNil(addr basics.Address) *string {
if addr.IsZero() {
return nil
@@ -272,26 +276,29 @@ func stateDeltaToStateDelta(d basics.StateDelta) *model.StateDelta {
return &delta
}
+func edIndexToAddress(index uint64, txn *transactions.Transaction, shared []basics.Address) string {
+ // index into [Sender, txn.Accounts[0], txn.Accounts[1], ..., shared[0], shared[1], ...]
+ switch {
+ case index == 0:
+ return txn.Sender.String()
+ case int(index-1) < len(txn.Accounts):
+ return txn.Accounts[index-1].String()
+ case int(index-1)-len(txn.Accounts) < len(shared):
+ return shared[int(index-1)-len(txn.Accounts)].String()
+ default:
+ return fmt.Sprintf("Invalid Account Index %d in LocalDelta", index)
+ }
+}
+
func convertToDeltas(txn node.TxnWithStatus) (*[]model.AccountStateDelta, *model.StateDelta) {
var localStateDelta *[]model.AccountStateDelta
if len(txn.ApplyData.EvalDelta.LocalDeltas) > 0 {
d := make([]model.AccountStateDelta, 0)
- accounts := txn.Txn.Txn.Accounts
+ shared := txn.ApplyData.EvalDelta.SharedAccts
for k, v := range txn.ApplyData.EvalDelta.LocalDeltas {
- // Resolve address from index
- var addr string
- if k == 0 {
- addr = txn.Txn.Txn.Sender.String()
- } else {
- if int(k-1) < len(accounts) {
- addr = txn.Txn.Txn.Accounts[k-1].String()
- } else {
- addr = fmt.Sprintf("Invalid Address Index: %d", k-1)
- }
- }
d = append(d, model.AccountStateDelta{
- Address: addr,
+ Address: edIndexToAddress(k, &txn.Txn.Txn, shared),
Delta: *(stateDeltaToStateDelta(v)),
})
}
@@ -353,22 +360,25 @@ func ConvertInnerTxn(txn *transactions.SignedTxnWithAD) PreEncodedTxInfo {
return response
}
-func convertTxnResult(txnResult simulation.TxnResult) preEncodedSimulateTxnResult {
- return preEncodedSimulateTxnResult{
- Txn: ConvertInnerTxn(&txnResult.Txn),
- MissingSignature: trueOrNil(txnResult.MissingSignature),
+func convertTxnResult(txnResult simulation.TxnResult) PreEncodedSimulateTxnResult {
+ return PreEncodedSimulateTxnResult{
+ Txn: ConvertInnerTxn(&txnResult.Txn),
+ AppBudgetConsumed: numOrNil(txnResult.AppBudgetConsumed),
+ LogicSigBudgetConsumed: numOrNil(txnResult.LogicSigBudgetConsumed),
}
}
-func convertTxnGroupResult(txnGroupResult simulation.TxnGroupResult) preEncodedSimulateTxnGroupResult {
- txnResults := make([]preEncodedSimulateTxnResult, len(txnGroupResult.Txns))
+func convertTxnGroupResult(txnGroupResult simulation.TxnGroupResult) PreEncodedSimulateTxnGroupResult {
+ txnResults := make([]PreEncodedSimulateTxnResult, len(txnGroupResult.Txns))
for i, txnResult := range txnGroupResult.Txns {
txnResults[i] = convertTxnResult(txnResult)
}
- encoded := preEncodedSimulateTxnGroupResult{
- Txns: txnResults,
- FailureMessage: strOrNil(txnGroupResult.FailureMessage),
+ encoded := PreEncodedSimulateTxnGroupResult{
+ Txns: txnResults,
+ FailureMessage: strOrNil(txnGroupResult.FailureMessage),
+ AppBudgetAdded: numOrNil(txnGroupResult.AppBudgetAdded),
+ AppBudgetConsumed: numOrNil(txnGroupResult.AppBudgetConsumed),
}
if len(txnGroupResult.FailedAt) > 0 {
@@ -380,12 +390,22 @@ func convertTxnGroupResult(txnGroupResult simulation.TxnGroupResult) preEncodedS
return encoded
}
-func convertSimulationResult(result simulation.Result) preEncodedSimulateResponse {
- encodedSimulationResult := preEncodedSimulateResponse{
- Version: result.Version,
- LastRound: uint64(result.LastRound),
- WouldSucceed: result.WouldSucceed,
- TxnGroups: make([]preEncodedSimulateTxnGroupResult, len(result.TxnGroups)),
+func convertSimulationResult(result simulation.Result) PreEncodedSimulateResponse {
+ var evalOverrides *model.SimulationEvalOverrides
+ if result.EvalOverrides != (simulation.ResultEvalOverrides{}) {
+ evalOverrides = &model.SimulationEvalOverrides{
+ AllowEmptySignatures: trueOrNil(result.EvalOverrides.AllowEmptySignatures),
+ MaxLogSize: result.EvalOverrides.MaxLogSize,
+ MaxLogCalls: result.EvalOverrides.MaxLogCalls,
+ ExtraOpcodeBudget: numOrNil(result.EvalOverrides.ExtraOpcodeBudget),
+ }
+ }
+
+ encodedSimulationResult := PreEncodedSimulateResponse{
+ Version: result.Version,
+ LastRound: uint64(result.LastRound),
+ TxnGroups: make([]PreEncodedSimulateTxnGroupResult, len(result.TxnGroups)),
+ EvalOverrides: evalOverrides,
}
for i, txnGroup := range result.TxnGroups {
@@ -395,6 +415,19 @@ func convertSimulationResult(result simulation.Result) preEncodedSimulateRespons
return encodedSimulationResult
}
+func convertSimulationRequest(request PreEncodedSimulateRequest) simulation.Request {
+ txnGroups := make([][]transactions.SignedTxn, len(request.TxnGroups))
+ for i, txnGroup := range request.TxnGroups {
+ txnGroups[i] = txnGroup.Txns
+ }
+ return simulation.Request{
+ TxnGroups: txnGroups,
+ AllowEmptySignatures: request.AllowEmptySignatures,
+ AllowMoreLogging: request.AllowMoreLogging,
+ ExtraOpcodeBudget: request.ExtraOpcodeBudget,
+ }
+}
+
// printableUTF8OrEmpty checks to see if the entire string is a UTF8 printable string.
// If this is the case, the string is returned as is. Otherwise, the empty string is returned.
func printableUTF8OrEmpty(in string) string {
diff --git a/daemon/algod/server.go b/daemon/algod/server.go
index e116f206b..010d3aec5 100644
--- a/daemon/algod/server.go
+++ b/daemon/algod/server.go
@@ -83,7 +83,6 @@ func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genes
liveLog := filepath.Join(s.RootPath, "node.log")
archive := filepath.Join(s.RootPath, cfg.LogArchiveName)
- fmt.Println("Logging to: ", liveLog)
var maxLogAge time.Duration
var err error
if cfg.LogArchiveMaxAge != "" {
@@ -96,8 +95,10 @@ func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genes
var logWriter io.Writer
if cfg.LogSizeLimit > 0 {
+ fmt.Println("Logging to: ", liveLog)
logWriter = logging.MakeCyclicFileWriter(liveLog, archive, cfg.LogSizeLimit, maxLogAge)
} else {
+ fmt.Println("Logging to: stdout")
logWriter = os.Stdout
}
s.log.SetOutput(logWriter)
@@ -184,17 +185,17 @@ func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genes
// if we have the telemetry enabled, we want to use it's sessionid as part of the
// collected metrics decorations.
- fmt.Fprintln(logWriter, "++++++++++++++++++++++++++++++++++++++++")
- fmt.Fprintln(logWriter, "Logging Starting")
+ s.log.Infoln("++++++++++++++++++++++++++++++++++++++++")
+ s.log.Infoln("Logging Starting")
if s.log.GetTelemetryUploadingEnabled() {
// May or may not be logging to node.log
- fmt.Fprintf(logWriter, "Telemetry Enabled: %s\n", s.log.GetTelemetryGUID())
- fmt.Fprintf(logWriter, "Session: %s\n", s.log.GetTelemetrySession())
+ s.log.Infof("Telemetry Enabled: %s\n", s.log.GetTelemetryGUID())
+ s.log.Infof("Session: %s\n", s.log.GetTelemetrySession())
} else {
// May or may not be logging to node.log
- fmt.Fprintln(logWriter, "Telemetry Disabled")
+ s.log.Infoln("Telemetry Disabled")
}
- fmt.Fprintln(logWriter, "++++++++++++++++++++++++++++++++++++++++")
+ s.log.Infoln("++++++++++++++++++++++++++++++++++++++++")
metricLabels := map[string]string{}
if s.log.GetTelemetryEnabled() {
diff --git a/data/account/participationRegistry.go b/data/account/participationRegistry.go
index 1381ddbaf..bc9d0b301 100644
--- a/data/account/participationRegistry.go
+++ b/data/account/participationRegistry.go
@@ -232,7 +232,7 @@ type ParticipationRegistry interface {
// once, an error will occur when the data is flushed when inserting a duplicate key.
AppendKeys(id ParticipationID, keys StateProofKeys) error
- // DeleteStateProofKeys removes all stateproof keys preceding a given round (including)
+ // DeleteStateProofKeys removes all stateproof keys up to, and not including, a given round
DeleteStateProofKeys(id ParticipationID, round basics.Round) error
// Delete removes a record from storage.
@@ -347,7 +347,7 @@ const (
insertKeysetQuery = `INSERT INTO Keysets (participationID, account, firstValidRound, lastValidRound, keyDilution, vrf, stateProof) VALUES (?, ?, ?, ?, ?, ?, ?)`
insertRollingQuery = `INSERT INTO Rolling (pk, voting) VALUES (?, ?)`
appendStateProofKeysQuery = `INSERT INTO StateProofKeys (pk, round, key) VALUES(?, ?, ?)`
- deleteStateProofKeysQuery = `DELETE FROM StateProofKeys WHERE pk=? AND round<=?`
+ deleteStateProofKeysQuery = `DELETE FROM StateProofKeys WHERE pk=? AND round<?`
// SELECT pk FROM Keysets WHERE participationID = ?
selectPK = `SELECT pk FROM Keysets WHERE participationID = ? LIMIT 1`
diff --git a/data/account/participation_test.go b/data/account/participation_test.go
index b9a417a8f..4a933d72b 100644
--- a/data/account/participation_test.go
+++ b/data/account/participation_test.go
@@ -344,7 +344,7 @@ func setupTestDBAtVer2(partDB db.Accessor, part Participation) error {
keyDilution INTEGER NOT NULL DEFAULT 0
);`)
if err != nil {
- return nil
+ return err
}
if err := setupSchemaForTest(tx, 2); err != nil {
@@ -362,12 +362,12 @@ func setupTestDBAtVer2(partDB db.Accessor, part Participation) error {
func setupSchemaForTest(tx *sql.Tx, version int) error {
_, err := tx.Exec(`CREATE TABLE schema (tablename TEXT PRIMARY KEY, version INTEGER);`)
if err != nil {
- return nil
+ return err
}
_, err = tx.Exec("INSERT INTO schema (tablename, version) VALUES (?, ?)", PartTableSchemaName, version)
if err != nil {
- return nil
+ return err
}
return err
}
diff --git a/data/accountManager.go b/data/accountManager.go
index 222a45944..063ab4d1b 100644
--- a/data/accountManager.go
+++ b/data/accountManager.go
@@ -45,7 +45,8 @@ type AccountManager struct {
log logging.Logger
}
-// DeleteStateProofKey deletes all keys connected to ParticipationID that came before (including) the given round.
+// DeleteStateProofKey deletes keys related to a ParticipationID. The function removes
+// all keys up to, and not including, the given round.
func (manager *AccountManager) DeleteStateProofKey(id account.ParticipationID, round basics.Round) error {
return manager.registry.DeleteStateProofKeys(id, round)
}
@@ -82,7 +83,7 @@ func (manager *AccountManager) StateProofKeys(rnd basics.Round) (out []account.S
if part.StateProof != nil && part.OverlapsInterval(rnd, rnd) {
partRndSecrets, err := manager.registry.GetStateProofSecretsForRound(part.ParticipationID, rnd)
if err != nil {
- manager.log.Errorf("error while loading round secrets from participation registry: %v", err)
+ manager.log.Warnf("could not load state proof keys from participation registry: %v", err)
continue
}
out = append(out, partRndSecrets)
diff --git a/data/accountManager_test.go b/data/accountManager_test.go
index 0be2ec139..3d580a5f2 100644
--- a/data/accountManager_test.go
+++ b/data/accountManager_test.go
@@ -201,16 +201,6 @@ func TestAccountManagerOverlappingStateProofKeys(t *testing.T) {
acctManager := MakeAccountManager(log, registry)
- databaseFiles := make([]string, 0)
- defer func() {
- for _, fileName := range databaseFiles {
- os.Remove(fileName)
- os.Remove(fileName + "-shm")
- os.Remove(fileName + "-wal")
- os.Remove(fileName + "-journal")
- }
- }()
-
// Generate 2 participations under the same account
store, err := db.MakeAccessor("stateprooftest", false, true)
a.NoError(err)
@@ -261,6 +251,53 @@ func TestAccountManagerOverlappingStateProofKeys(t *testing.T) {
a.Equal(1, len(res))
}
+func TestAccountManagerRemoveStateProofKeysForExpiredAccounts(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := assert.New(t)
+
+ registry, dbName := getRegistryImpl(t, false, true)
+ defer registryCloseTest(t, registry, dbName)
+
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Error)
+
+ acctManager := MakeAccountManager(log, registry)
+
+ store, err := db.MakeAccessor("stateprooftest", false, true)
+ a.NoError(err)
+ root, err := account.GenerateRoot(store)
+ a.NoError(err)
+ part1, err := account.FillDBWithParticipationKeys(store, root.Address(), 0, basics.Round(merklesignature.KeyLifetimeDefault*2), 3)
+ a.NoError(err)
+ store.Close()
+
+ keys1 := part1.StateProofSecrets.GetAllKeys()
+
+ // Add participations to the registry and append StateProof keys as well
+ part1ID, err := acctManager.registry.Insert(part1.Participation)
+ a.NoError(err)
+ err = registry.AppendKeys(part1ID, keys1)
+ a.NoError(err)
+
+ err = acctManager.registry.Flush(10 * time.Second)
+ a.NoError(err)
+
+ for i := 1; i <= 2; i++ {
+ res := acctManager.StateProofKeys(basics.Round(i * merklesignature.KeyLifetimeDefault))
+ a.Equal(1, len(res))
+ }
+
+ b := bookkeeping.BlockHeader{Round: part1.LastValid + 1}
+ acctManager.DeleteOldKeys(b, config.Consensus[protocol.ConsensusCurrentVersion])
+ err = acctManager.registry.Flush(10 * time.Second)
+ a.NoError(err)
+
+ for i := 1; i <= 2; i++ {
+ res := acctManager.StateProofKeys(basics.Round(i * merklesignature.KeyLifetimeDefault))
+ a.Equal(0, len(res))
+ }
+}
+
func TestGetStateProofKeysDontLogErrorOnNilStateProof(t *testing.T) {
partitiontest.PartitionTest(t)
a := assert.New(t)
@@ -274,16 +311,6 @@ func TestGetStateProofKeysDontLogErrorOnNilStateProof(t *testing.T) {
log.SetOutput(logbuffer)
acctManager := MakeAccountManager(log, registry)
- databaseFiles := make([]string, 0)
- defer func() {
- for _, fileName := range databaseFiles {
- os.Remove(fileName)
- os.Remove(fileName + "-shm")
- os.Remove(fileName + "-wal")
- os.Remove(fileName + "-journal")
- }
- }()
-
// Generate 2 participations under the same account
store, err := db.MakeAccessor("stateprooftest", false, true)
a.NoError(err)
diff --git a/data/basics/units.go b/data/basics/units.go
index de23f533a..8370ceda6 100644
--- a/data/basics/units.go
+++ b/data/basics/units.go
@@ -135,3 +135,8 @@ func (round Round) SubSaturate(x Round) Round {
func (round Round) RoundUpToMultipleOf(n Round) Round {
return (round + n - 1) / n * n
}
+
+// RoundDownToMultipleOf rounds down round to a multiple of n.
+func (round Round) RoundDownToMultipleOf(n Round) Round {
+ return (round / n) * n
+}
diff --git a/data/basics/units_test.go b/data/basics/units_test.go
index b5c698e46..2359bc1c6 100644
--- a/data/basics/units_test.go
+++ b/data/basics/units_test.go
@@ -58,6 +58,7 @@ func TestAddSaturate32(t *testing.T) {
func TestRoundUpToMultipleOf(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
r := Round(24)
for n := Round(1); n < Round(100); n++ {
@@ -71,6 +72,24 @@ func TestRoundUpToMultipleOf(t *testing.T) {
}
}
+func TestRoundDownToMultipleOf(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ a := require.New(t)
+
+ r := Round(24)
+ for n := Round(1); n < Round(100); n++ {
+ mul := r.RoundDownToMultipleOf(n)
+ a.True(mul <= r)
+ a.Equal(Round(0), mul%n)
+ if r < n {
+ a.Equal(Round(0), mul)
+ } else if r == n {
+ a.Equal(n, mul)
+ }
+ }
+}
+
func OldMuldiv(a uint64, b uint64, c uint64) (res uint64, overflow bool) {
var aa big.Int
aa.SetUint64(a)
diff --git a/data/bookkeeping/genesis.go b/data/bookkeeping/genesis.go
index a81103f28..d750b0eaf 100644
--- a/data/bookkeeping/genesis.go
+++ b/data/bookkeeping/genesis.go
@@ -223,6 +223,12 @@ func MakeGenesisBlock(proto protocol.ConsensusVersion, genesisBal GenesisBalance
},
}
+ // If a new network is being created in which AVM can't access low numbered
+ // resources, bump the TxnCounter so there won't be any such resources.
+ if params.AppForbidLowResources {
+ blk.TxnCounter = 1000
+ }
+
if params.SupportGenesisHash {
blk.BlockHeader.GenesisHash = genesisHash
}
diff --git a/data/datatest/impls.go b/data/datatest/impls.go
index 960b12e37..fefcb054d 100644
--- a/data/datatest/impls.go
+++ b/data/datatest/impls.go
@@ -107,8 +107,8 @@ func (i ledgerImpl) LookupAgreement(r basics.Round, addr basics.Address) (basics
}
// Circulation implements Ledger.Circulation.
-func (i ledgerImpl) Circulation(r basics.Round) (basics.MicroAlgos, error) {
- return i.l.Circulation(r)
+func (i ledgerImpl) Circulation(r basics.Round, voteRnd basics.Round) (basics.MicroAlgos, error) {
+ return i.l.Circulation(r, voteRnd)
}
// Wait implements Ledger.Wait.
diff --git a/data/ledger.go b/data/ledger.go
index 04e532061..27949168f 100644
--- a/data/ledger.go
+++ b/data/ledger.go
@@ -51,16 +51,17 @@ type Ledger struct {
lastRoundSeed atomic.Value
}
-// roundCirculationPair used to hold a pair of matching round number and the amount of online money
-type roundCirculationPair struct {
+// roundCirculationItem used to hold matching round number, vote round and the amount of online money
+type roundCirculationItem struct {
round basics.Round
+ voteRound basics.Round
onlineMoney basics.MicroAlgos
}
// roundCirculation is the cache for the circulating coins
type roundCirculation struct {
// elements holds several round-onlineMoney pairs
- elements [2]roundCirculationPair
+ elements [2]roundCirculationItem
}
// roundSeedPair is the cache for a single seed at a given round
@@ -174,28 +175,29 @@ func (l *Ledger) NextRound() basics.Round {
}
// Circulation implements agreement.Ledger.Circulation.
-func (l *Ledger) Circulation(r basics.Round) (basics.MicroAlgos, error) {
+func (l *Ledger) Circulation(r basics.Round, voteRnd basics.Round) (basics.MicroAlgos, error) {
circulation, cached := l.lastRoundCirculation.Load().(roundCirculation)
if cached && r != basics.Round(0) {
for _, element := range circulation.elements {
- if element.round == r {
+ if element.round == r && element.voteRound == voteRnd {
return element.onlineMoney, nil
}
}
}
- totals, err := l.OnlineTotals(r)
+ totals, err := l.OnlineCirculation(r, voteRnd)
if err != nil {
return basics.MicroAlgos{}, err
}
- if !cached || r > circulation.elements[1].round {
+ if !cached || r > circulation.elements[1].round || voteRnd > circulation.elements[1].voteRound {
l.lastRoundCirculation.Store(
roundCirculation{
- elements: [2]roundCirculationPair{
+ elements: [2]roundCirculationItem{
circulation.elements[1],
{
round: r,
+ voteRound: voteRnd,
onlineMoney: totals},
},
})
diff --git a/data/ledger_test.go b/data/ledger_test.go
index 540dfea1d..20a082839 100644
--- a/data/ledger_test.go
+++ b/data/ledger_test.go
@@ -120,7 +120,8 @@ func testGenerateInitState(tb testing.TB, proto protocol.ConsensusVersion) (gene
func TestLedgerCirculation(t *testing.T) {
partitiontest.PartitionTest(t)
- genesisInitState, keys := testGenerateInitState(t, protocol.ConsensusCurrentVersion)
+ proto := protocol.ConsensusCurrentVersion
+ genesisInitState, keys := testGenerateInitState(t, proto)
const inMem = true
cfg := config.GetDefaultLocal()
@@ -171,6 +172,8 @@ func TestLedgerCirculation(t *testing.T) {
srcAccountKey := keys[sourceAccount]
require.NotNil(t, srcAccountKey)
+ params := config.Consensus[proto]
+
for rnd := basics.Round(1); rnd < basics.Round(600); rnd++ {
blk.BlockHeader.Round++
blk.BlockHeader.TimeStamp += int64(crypto.RandUint64() % 100 * 1000)
@@ -191,6 +194,8 @@ func TestLedgerCirculation(t *testing.T) {
require.NoError(t, l.AddBlock(blk, agreement.Certificate{}))
l.WaitForCommit(rnd)
+ var voteRoundOffset = basics.Round(2 * params.SeedRefreshInterval * params.SeedLookback)
+
// test most recent round
if rnd < basics.Round(500) {
data, validThrough, _, err = realLedger.LookupAccount(rnd, destAccount)
@@ -202,11 +207,11 @@ func TestLedgerCirculation(t *testing.T) {
require.Equal(t, rnd, validThrough)
require.Equal(t, baseDestValue+uint64(rnd), data.MicroAlgos.Raw)
- roundCirculation, err := realLedger.OnlineTotals(rnd)
+ roundCirculation, err := realLedger.OnlineCirculation(rnd, rnd+voteRoundOffset)
require.NoError(t, err)
require.Equal(t, baseCirculation-uint64(rnd)*(10001), roundCirculation.Raw)
- roundCirculation, err = l.OnlineTotals(rnd)
+ roundCirculation, err = l.OnlineCirculation(rnd, rnd+voteRoundOffset)
require.NoError(t, err)
require.Equal(t, baseCirculation-uint64(rnd)*(10001), roundCirculation.Raw)
} else if rnd < basics.Round(510) {
@@ -220,11 +225,11 @@ func TestLedgerCirculation(t *testing.T) {
require.Equal(t, rnd-1, validThrough)
require.Equal(t, baseDestValue+uint64(rnd)-1, data.MicroAlgos.Raw)
- roundCirculation, err := realLedger.OnlineTotals(rnd - 1)
+ roundCirculation, err := realLedger.OnlineCirculation(rnd-1, rnd-1+voteRoundOffset)
require.NoError(t, err)
require.Equal(t, baseCirculation-uint64(rnd-1)*(10001), roundCirculation.Raw)
- roundCirculation, err = l.OnlineTotals(rnd - 1)
+ roundCirculation, err = l.OnlineCirculation(rnd-1, rnd-1+voteRoundOffset)
require.NoError(t, err)
require.Equal(t, baseCirculation-uint64(rnd-1)*(10001), roundCirculation.Raw)
} else if rnd < basics.Round(520) {
@@ -236,17 +241,17 @@ func TestLedgerCirculation(t *testing.T) {
require.Error(t, err)
require.Equal(t, uint64(0), data.MicroAlgos.Raw)
- _, err = realLedger.OnlineTotals(rnd + 1)
+ _, err = realLedger.OnlineCirculation(rnd+1, rnd+1+voteRoundOffset)
require.Error(t, err)
- _, err = l.OnlineTotals(rnd + 1)
+ _, err = l.OnlineCirculation(rnd+1, rnd+1+voteRoundOffset)
require.Error(t, err)
} else if rnd < basics.Round(520) {
// test expired round ( expected error )
- _, err = realLedger.OnlineTotals(rnd - 500)
+ _, err = realLedger.OnlineCirculation(rnd-500, rnd-500+voteRoundOffset)
require.Error(t, err)
- _, err = l.OnlineTotals(rnd - 500)
+ _, err = l.OnlineCirculation(rnd-500, rnd-500+voteRoundOffset)
require.Error(t, err)
}
}
diff --git a/data/pools/transactionPool.go b/data/pools/transactionPool.go
index bc0da751f..a03baea4f 100644
--- a/data/pools/transactionPool.go
+++ b/data/pools/transactionPool.go
@@ -700,7 +700,7 @@ func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transact
if hint < 0 || int(knownCommitted) < 0 {
hint = 0
}
- pool.pendingBlockEvaluator, err = pool.ledger.StartEvaluator(next.BlockHeader, hint, 0)
+ pool.pendingBlockEvaluator, err = pool.ledger.StartEvaluator(next.BlockHeader, hint, 0, nil)
if err != nil {
// The pendingBlockEvaluator is an interface, and in case of an evaluator error
// we want to remove the interface itself rather then keeping an interface
@@ -805,21 +805,13 @@ func (pool *TransactionPool) getStateProofStats(txib *transactions.SignedTxnInBl
}
lastSPRound := basics.Round(txib.Txn.StateProofTxnFields.Message.LastAttestedRound)
- lastRoundHdr, err := pool.ledger.BlockHdr(lastSPRound)
+ verificationCtx, err := pool.ledger.GetStateProofVerificationContext(lastSPRound)
if err != nil {
return stateProofStats
}
- proto := config.Consensus[lastRoundHdr.CurrentProtocol]
- votersRound := lastSPRound.SubSaturate(basics.Round(proto.StateProofInterval))
- votersRoundHdr, err := pool.ledger.BlockHdr(votersRound)
- if err != nil {
- return stateProofStats
- }
-
- totalWeight := votersRoundHdr.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight.Raw
- stateProofStats.ProvenWeight, _ = basics.Muldiv(totalWeight, uint64(proto.StateProofWeightThreshold), 1<<32)
-
+ totalWeight := verificationCtx.OnlineTotalWeight.Raw
+ stateProofStats.ProvenWeight, _ = basics.Muldiv(totalWeight, uint64(config.Consensus[verificationCtx.Version].StateProofWeightThreshold), 1<<32)
return stateProofStats
}
@@ -976,7 +968,7 @@ func (pool *TransactionPool) assembleEmptyBlock(round basics.Round) (assembled *
return nil, err
}
next := bookkeeping.MakeBlock(prev)
- blockEval, err := pool.ledger.StartEvaluator(next.BlockHeader, 0, 0)
+ blockEval, err := pool.ledger.StartEvaluator(next.BlockHeader, 0, 0, nil)
if err != nil {
var nonSeqBlockEval ledgercore.ErrNonSequentialBlockEval
if errors.As(err, &nonSeqBlockEval) {
diff --git a/data/pools/transactionPool_test.go b/data/pools/transactionPool_test.go
index 02239d50f..b8adcff0a 100644
--- a/data/pools/transactionPool_test.go
+++ b/data/pools/transactionPool_test.go
@@ -119,7 +119,7 @@ func newBlockEvaluator(t TestingT, l *ledger.Ledger) BlockEvaluator {
require.NoError(t, err)
next := bookkeeping.MakeBlock(prev)
- eval, err := l.StartEvaluator(next.BlockHeader, 0, 0)
+ eval, err := l.StartEvaluator(next.BlockHeader, 0, 0, nil)
require.NoError(t, err)
return eval
@@ -1453,7 +1453,7 @@ func TestStateProofLogging(t *testing.T) {
require.NoError(t, err)
b.BlockHeader.Branch = phdr.Hash()
- _, err = mockLedger.StartEvaluator(b.BlockHeader, 0, 10000)
+ _, err = mockLedger.StartEvaluator(b.BlockHeader, 0, 10000, nil)
require.NoError(t, err)
// Simulate the blocks up to round 512 without any transactions
@@ -1477,7 +1477,7 @@ func TestStateProofLogging(t *testing.T) {
break
}
- _, err = mockLedger.StartEvaluator(b.BlockHeader, 0, 10000)
+ _, err = mockLedger.StartEvaluator(b.BlockHeader, 0, 10000, nil)
require.NoError(t, err)
}
@@ -1499,7 +1499,7 @@ func TestStateProofLogging(t *testing.T) {
require.NotNil(t, voters)
// Get the message
- msg, err := stateproof.GenerateStateProofMessage(mockLedger, uint64(votersRound), spRoundHdr)
+ msg, err := stateproof.GenerateStateProofMessage(mockLedger, round)
// Get the SP
proof := generateProofForTesting(uint64(round), msg, provenWeight, voters.Participants, voters.Tree, allKeys, t)
@@ -1520,7 +1520,7 @@ func TestStateProofLogging(t *testing.T) {
require.NoError(t, err)
// Add it to the transaction pool and assemble the block
- eval, err := mockLedger.StartEvaluator(b.BlockHeader, 0, 1000000)
+ eval, err := mockLedger.StartEvaluator(b.BlockHeader, 0, 1000000, nil)
require.NoError(t, err)
err = eval.Transaction(stxn, transactions.ApplyData{})
@@ -1586,7 +1586,7 @@ func generateProofForTesting(
// Prepare the builder
stateProofStrengthTargetForTests := config.Consensus[protocol.ConsensusCurrentVersion].StateProofStrengthTarget
- b, err := cryptostateproof.MakeBuilder(data, round, provenWeight,
+ b, err := cryptostateproof.MakeProver(data, round, provenWeight,
partArray, partTree, stateProofStrengthTargetForTests)
require.NoError(t, err)
@@ -1607,7 +1607,7 @@ func generateProofForTesting(
}
// Build the SP
- proof, err := b.Build()
+ proof, err := b.CreateProof()
require.NoError(t, err)
return proof
diff --git a/data/transactions/application.go b/data/transactions/application.go
index 6ae70ed65..c303093c8 100644
--- a/data/transactions/application.go
+++ b/data/transactions/application.go
@@ -230,8 +230,7 @@ func (ac *ApplicationCallTxnFields) AddressByIndex(accountIdx uint64, sender bas
// An index > 0 corresponds to an offset into txn.Accounts. Check to
// make sure the index is valid.
if accountIdx > uint64(len(ac.Accounts)) {
- err := fmt.Errorf("invalid Account reference %d", accountIdx)
- return basics.Address{}, err
+ return basics.Address{}, fmt.Errorf("invalid Account reference %d", accountIdx)
}
// accountIdx must be in [1, len(ac.Accounts)]
@@ -256,45 +255,3 @@ func (ac *ApplicationCallTxnFields) IndexByAddress(target basics.Address, sender
return 0, fmt.Errorf("invalid Account reference %s", target)
}
-
-// AppIDByIndex converts an integer index into an application id associated with the
-// transaction. Index 0 corresponds to the current app, and an index > 0
-// corresponds to an offset into txn.ForeignApps. Returns an error if the index is
-// not valid.
-func (ac *ApplicationCallTxnFields) AppIDByIndex(i uint64) (basics.AppIndex, error) {
-
- // Index 0 always corresponds to the current app
- if i == 0 {
- return ac.ApplicationID, nil
- }
-
- // An index > 0 corresponds to an offset into txn.ForeignApps. Check to
- // make sure the index is valid.
- if i > uint64(len(ac.ForeignApps)) {
- err := fmt.Errorf("invalid Foreign App reference %d", i)
- return basics.AppIndex(0), err
- }
-
- // aidx must be in [1, len(ac.ForeignApps)]
- return ac.ForeignApps[i-1], nil
-}
-
-// IndexByAppID converts an application id into an integer offset into [current app,
-// txn.ForeignApps[0], ...], returning the index at the first match. It returns
-// an error if there is no such match.
-func (ac *ApplicationCallTxnFields) IndexByAppID(appID basics.AppIndex) (uint64, error) {
-
- // Index 0 always corresponds to the current app
- if appID == ac.ApplicationID {
- return 0, nil
- }
-
- // Otherwise we index into ac.ForeignApps
- for i, id := range ac.ForeignApps {
- if appID == id {
- return uint64(i) + 1, nil
- }
- }
-
- return 0, fmt.Errorf("invalid Foreign App reference %d", appID)
-}
diff --git a/data/transactions/application_test.go b/data/transactions/application_test.go
index 24bff87a0..a0a839b13 100644
--- a/data/transactions/application_test.go
+++ b/data/transactions/application_test.go
@@ -124,30 +124,3 @@ func TestEncodedAppTxnAllocationBounds(t *testing.T) {
}
}
}
-
-func TestIDByIndex(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
- ac := ApplicationCallTxnFields{}
- ac.ApplicationID = 1
- appID, err := ac.AppIDByIndex(0)
- a.NoError(err)
- a.Equal(basics.AppIndex(1), appID)
- appID, err = ac.AppIDByIndex(1)
- a.Contains(err.Error(), "invalid Foreign App reference")
-
-}
-
-func TestIndexByID(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- a := require.New(t)
- ac := ApplicationCallTxnFields{}
- ac.ApplicationID = 1
- aidx, err := ac.IndexByAppID(1)
- a.NoError(err)
- a.Equal(uint64(0), aidx)
- aidx, err = ac.IndexByAppID(2)
- a.Contains(err.Error(), "invalid Foreign App reference")
-}
diff --git a/data/transactions/logic/README.md b/data/transactions/logic/README.md
index 324cd7d53..7c51a9c23 100644
--- a/data/transactions/logic/README.md
+++ b/data/transactions/logic/README.md
@@ -40,6 +40,32 @@ has fewer than two elements, the operation fails. Some operations, like
`frame_dig` and `proto` could fail because of an attempt to access
above the current stack.
+## Stack Types
+
+While every element of the stack is restricted to the types `uint64` and `bytes`,
+the values of these types may be known to be bounded. The more common bounded types are
+named to provide more semantic information in the documentation. They're also used during
+assembly time to do type checking and to provide more informative error messages.
+
+
+#### Definitions
+
+| Name | Bound | AVM Type |
+| ---- | ---- | -------- |
+| uint64 | x <= 18446744073709551615 | uint64 |
+| stateKey | len(x) <= 64 | []byte |
+| none | | none |
+| method | len(x) == 4 | []byte |
+| boxName | 1 <= len(x) <= 64 | []byte |
+| bool | x <= 1 | uint64 |
+| bigint | len(x) <= 64 | []byte |
+| any | | any |
+| address | len(x) == 32 | []byte |
+| []byte | len(x) <= 4096 | []byte |
+| [32]byte | len(x) == 32 | []byte |
+
+
+
## Scratch Space
In addition to the stack there are 256 positions of scratch
@@ -193,6 +219,41 @@ _available_.
* Since v7, the account associated with any contract present in the
`txn.ForeignApplications` field is _available_.
+
+ * Since v9, there is group-level resource sharing. Any resource that
+ is available in _some_ top-level transaction in a transaction group
+ is available in _all_ v9 or later application calls in the group,
+ whether those application calls are top-level or inner.
+
+ * When considering whether an asset holding or application local
+ state is available by group-level resource sharing, the holding or
+ local state must be available in a top-level transaction without
+ considering group sharing. For example, if account A is made
+ available in one transaction, and asset X is made available in
+ another, group resource sharing does _not_ make A's X holding
+ available.
+
+ * Top-level transactions that are not application calls also make
+ resources available to group-level resource sharing. The following
+ resources are made available by other transaction types.
+
+ 1. `pay` - `txn.Sender`, `txn.Receiver`, and
+ `txn.CloseRemainderTo` (if set).
+
+ 1. `keyreg` - `txn.Sender`
+
+ 1. `acfg` - `txn.Sender`, `txn.ConfigAsset`, and the
+ `txn.ConfigAsset` holding of `txn.Sender`.
+
+ 1. `axfer` - `txn.Sender`, `txn.AssetReceiver`, `txn.AssetSender`
+ (if set), `txnAssetCloseTo` (if set), `txn.XferAsset`, and the
+ `txn.XferAsset` holding of each of those accounts.
+
+ 1. `afrz` - `txn.Sender`, `txn.FreezeAccount`, `txn.FreezeAsset`,
+ and the `txn.FreezeAsset` holding of `txn.FreezeAccount`. The
+ `txn.FreezeAsset` holding of `txn.Sender` is _not_ made
+ available.
+
* A Box is _available_ to an Approval Program if _any_ transaction in
the same group contains a box reference (`txn.Boxes`) that denotes
@@ -202,6 +263,18 @@ _available_.
indicates the application ID of the app called by that
transaction. No box is ever _available_ to a ClearStateProgram.
+Regardless of _availability_, any attempt to access an Asset or
+Application with an ID less than 256 from within a Contract will fail
+immediately. This avoids any ambiguity in opcodes that interpret their
+integer arguments as resource IDs _or_ indexes into the
+`txn.ForeignAssets` or `txn.ForeignApplications` arrays.
+
+It is recommended that contract authors avoid supplying array indexes
+to these opcodes, and always use explicit resource IDs. By using
+explicit IDs, contracts will better take advantage of group resource
+sharing. The array indexing interpretation may be deprecated in a
+future version.
+
## Constants
Constants can be pushed onto the stack in two different ways:
@@ -444,18 +517,18 @@ Some of these have immediate data in the byte or bytes after the opcode.
##### Scalar Fields
| Index | Name | Type | In | Notes |
| - | ------ | -- | - | --------- |
-| 0 | Sender | []byte | | 32 byte address |
+| 0 | Sender | address | | 32 byte address |
| 1 | Fee | uint64 | | microalgos |
| 2 | FirstValid | uint64 | | round number |
| 3 | FirstValidTime | uint64 | v7 | UNIX timestamp of block before txn.FirstValid. Fails if negative |
| 4 | LastValid | uint64 | | round number |
| 5 | Note | []byte | | Any data up to 1024 bytes |
-| 6 | Lease | []byte | | 32 byte lease value |
-| 7 | Receiver | []byte | | 32 byte address |
+| 6 | Lease | [32]byte | | 32 byte lease value |
+| 7 | Receiver | address | | 32 byte address |
| 8 | Amount | uint64 | | microalgos |
-| 9 | CloseRemainderTo | []byte | | 32 byte address |
-| 10 | VotePK | []byte | | 32 byte address |
-| 11 | SelectionPK | []byte | | 32 byte address |
+| 9 | CloseRemainderTo | address | | 32 byte address |
+| 10 | VotePK | [32]byte | | 32 byte address |
+| 11 | SelectionPK | [32]byte | | 32 byte address |
| 12 | VoteFirst | uint64 | | The first round that the participation key is valid. |
| 13 | VoteLast | uint64 | | The last round that the participation key is valid. |
| 14 | VoteKeyDilution | uint64 | | Dilution for the 2-level participation key |
@@ -463,33 +536,33 @@ Some of these have immediate data in the byte or bytes after the opcode.
| 16 | TypeEnum | uint64 | | Transaction type as integer |
| 17 | XferAsset | uint64 | | Asset ID |
| 18 | AssetAmount | uint64 | | value in Asset's units |
-| 19 | AssetSender | []byte | | 32 byte address. Source of assets if Sender is the Asset's Clawback address. |
-| 20 | AssetReceiver | []byte | | 32 byte address |
-| 21 | AssetCloseTo | []byte | | 32 byte address |
+| 19 | AssetSender | address | | 32 byte address. Source of assets if Sender is the Asset's Clawback address. |
+| 20 | AssetReceiver | address | | 32 byte address |
+| 21 | AssetCloseTo | address | | 32 byte address |
| 22 | GroupIndex | uint64 | | Position of this transaction within an atomic transaction group. A stand-alone transaction is implicitly element 0 in a group of 1 |
-| 23 | TxID | []byte | | The computed ID for this transaction. 32 bytes. |
+| 23 | TxID | [32]byte | | The computed ID for this transaction. 32 bytes. |
| 24 | ApplicationID | uint64 | v2 | ApplicationID from ApplicationCall transaction |
| 25 | OnCompletion | uint64 | v2 | ApplicationCall transaction on completion action |
| 27 | NumAppArgs | uint64 | v2 | Number of ApplicationArgs |
| 29 | NumAccounts | uint64 | v2 | Number of Accounts |
| 30 | ApprovalProgram | []byte | v2 | Approval program |
| 31 | ClearStateProgram | []byte | v2 | Clear state program |
-| 32 | RekeyTo | []byte | v2 | 32 byte Sender's new AuthAddr |
+| 32 | RekeyTo | address | v2 | 32 byte Sender's new AuthAddr |
| 33 | ConfigAsset | uint64 | v2 | Asset ID in asset config transaction |
| 34 | ConfigAssetTotal | uint64 | v2 | Total number of units of this asset created |
| 35 | ConfigAssetDecimals | uint64 | v2 | Number of digits to display after the decimal place when displaying the asset |
-| 36 | ConfigAssetDefaultFrozen | uint64 | v2 | Whether the asset's slots are frozen by default or not, 0 or 1 |
+| 36 | ConfigAssetDefaultFrozen | bool | v2 | Whether the asset's slots are frozen by default or not, 0 or 1 |
| 37 | ConfigAssetUnitName | []byte | v2 | Unit name of the asset |
| 38 | ConfigAssetName | []byte | v2 | The asset name |
| 39 | ConfigAssetURL | []byte | v2 | URL |
-| 40 | ConfigAssetMetadataHash | []byte | v2 | 32 byte commitment to unspecified asset metadata |
-| 41 | ConfigAssetManager | []byte | v2 | 32 byte address |
-| 42 | ConfigAssetReserve | []byte | v2 | 32 byte address |
-| 43 | ConfigAssetFreeze | []byte | v2 | 32 byte address |
-| 44 | ConfigAssetClawback | []byte | v2 | 32 byte address |
+| 40 | ConfigAssetMetadataHash | [32]byte | v2 | 32 byte commitment to unspecified asset metadata |
+| 41 | ConfigAssetManager | address | v2 | 32 byte address |
+| 42 | ConfigAssetReserve | address | v2 | 32 byte address |
+| 43 | ConfigAssetFreeze | address | v2 | 32 byte address |
+| 44 | ConfigAssetClawback | address | v2 | 32 byte address |
| 45 | FreezeAsset | uint64 | v2 | Asset ID being frozen or un-frozen |
-| 46 | FreezeAssetAccount | []byte | v2 | 32 byte address of the account whose asset slot is being frozen or un-frozen |
-| 47 | FreezeAssetFrozen | uint64 | v2 | The new frozen value, 0 or 1 |
+| 46 | FreezeAssetAccount | address | v2 | 32 byte address of the account whose asset slot is being frozen or un-frozen |
+| 47 | FreezeAssetFrozen | bool | v2 | The new frozen value, 0 or 1 |
| 49 | NumAssets | uint64 | v3 | Number of Assets |
| 51 | NumApplications | uint64 | v3 | Number of Applications |
| 52 | GlobalNumUint | uint64 | v3 | Number of global state integers in ApplicationCall |
@@ -497,7 +570,7 @@ Some of these have immediate data in the byte or bytes after the opcode.
| 54 | LocalNumUint | uint64 | v3 | Number of local state integers in ApplicationCall |
| 55 | LocalNumByteSlice | uint64 | v3 | Number of local state byteslices in ApplicationCall |
| 56 | ExtraProgramPages | uint64 | v4 | Number of additional pages for each of the application's approval and clear state programs. An ExtraProgramPages of 1 means 2048 more total bytes, or 1024 for each program. |
-| 57 | Nonparticipation | uint64 | v5 | Marks an account nonparticipating for rewards |
+| 57 | Nonparticipation | bool | v5 | Marks an account nonparticipating for rewards |
| 59 | NumLogs | uint64 | v5 | Number of Logs (only with `itxn` in v5). Application mode only |
| 60 | CreatedAssetID | uint64 | v5 | Asset ID allocated by the creation of an ASA (only with `itxn` in v5). Application mode only |
| 61 | CreatedApplicationID | uint64 | v5 | ApplicationID allocated by the creation of an application (only with `itxn` in v5). Application mode only |
@@ -510,7 +583,7 @@ Some of these have immediate data in the byte or bytes after the opcode.
| Index | Name | Type | In | Notes |
| - | ------ | -- | - | --------- |
| 26 | ApplicationArgs | []byte | v2 | Arguments passed to the application in the ApplicationCall transaction |
-| 28 | Accounts | []byte | v2 | Accounts listed in the ApplicationCall transaction |
+| 28 | Accounts | address | v2 | Accounts listed in the ApplicationCall transaction |
| 48 | Assets | uint64 | v3 | Foreign Assets listed in the ApplicationCall transaction |
| 50 | Applications | uint64 | v3 | Foreign Apps listed in the ApplicationCall transaction |
| 58 | Logs | []byte | v5 | Log messages emitted by an application call (only with `itxn` in v5). Application mode only |
@@ -529,18 +602,18 @@ Global fields are fields that are common to all the transactions in the group. I
| 0 | MinTxnFee | uint64 | | microalgos |
| 1 | MinBalance | uint64 | | microalgos |
| 2 | MaxTxnLife | uint64 | | rounds |
-| 3 | ZeroAddress | []byte | | 32 byte address of all zero bytes |
+| 3 | ZeroAddress | address | | 32 byte address of all zero bytes |
| 4 | GroupSize | uint64 | | Number of transactions in this atomic transaction group. At least 1 |
| 5 | LogicSigVersion | uint64 | v2 | Maximum supported version |
| 6 | Round | uint64 | v2 | Current round number. Application mode only. |
| 7 | LatestTimestamp | uint64 | v2 | Last confirmed block UNIX timestamp. Fails if negative. Application mode only. |
| 8 | CurrentApplicationID | uint64 | v2 | ID of current application executing. Application mode only. |
-| 9 | CreatorAddress | []byte | v3 | Address of the creator of the current application. Application mode only. |
-| 10 | CurrentApplicationAddress | []byte | v5 | Address that the current application controls. Application mode only. |
-| 11 | GroupID | []byte | v5 | ID of the transaction group. 32 zero bytes if the transaction is not part of a group. |
+| 9 | CreatorAddress | address | v3 | Address of the creator of the current application. Application mode only. |
+| 10 | CurrentApplicationAddress | address | v5 | Address that the current application controls. Application mode only. |
+| 11 | GroupID | [32]byte | v5 | ID of the transaction group. 32 zero bytes if the transaction is not part of a group. |
| 12 | OpcodeBudget | uint64 | v6 | The remaining cost that can be spent by opcodes in this program. |
| 13 | CallerApplicationID | uint64 | v6 | The application ID of the application that called this application. 0 if this application is at the top-level. Application mode only. |
-| 14 | CallerApplicationAddress | []byte | v6 | The application address of the application that called this application. ZeroAddress if this application is at the top-level. Application mode only. |
+| 14 | CallerApplicationAddress | address | v6 | The application address of the application that called this application. ZeroAddress if this application is at the top-level. Application mode only. |
**Asset Fields**
@@ -550,23 +623,23 @@ Asset fields include `AssetHolding` and `AssetParam` fields that are used in the
| Index | Name | Type | Notes |
| - | ------ | -- | --------- |
| 0 | AssetBalance | uint64 | Amount of the asset unit held by this account |
-| 1 | AssetFrozen | uint64 | Is the asset frozen or not |
+| 1 | AssetFrozen | bool | Is the asset frozen or not |
| Index | Name | Type | In | Notes |
| - | ------ | -- | - | --------- |
| 0 | AssetTotal | uint64 | | Total number of units of this asset |
| 1 | AssetDecimals | uint64 | | See AssetParams.Decimals |
-| 2 | AssetDefaultFrozen | uint64 | | Frozen by default or not |
+| 2 | AssetDefaultFrozen | bool | | Frozen by default or not |
| 3 | AssetUnitName | []byte | | Asset unit name |
| 4 | AssetName | []byte | | Asset name |
| 5 | AssetURL | []byte | | URL with additional info about the asset |
-| 6 | AssetMetadataHash | []byte | | Arbitrary commitment |
-| 7 | AssetManager | []byte | | Manager address |
-| 8 | AssetReserve | []byte | | Reserve address |
-| 9 | AssetFreeze | []byte | | Freeze address |
-| 10 | AssetClawback | []byte | | Clawback address |
-| 11 | AssetCreator | []byte | v5 | Creator address |
+| 6 | AssetMetadataHash | [32]byte | | Arbitrary commitment |
+| 7 | AssetManager | address | | Manager address |
+| 8 | AssetReserve | address | | Reserve address |
+| 9 | AssetFreeze | address | | Freeze address |
+| 10 | AssetClawback | address | | Clawback address |
+| 11 | AssetCreator | address | v5 | Creator address |
**App Fields**
@@ -582,8 +655,8 @@ App fields used in the `app_params_get` opcode.
| 4 | AppLocalNumUint | uint64 | Number of uint64 values allowed in Local State |
| 5 | AppLocalNumByteSlice | uint64 | Number of byte array values allowed in Local State |
| 6 | AppExtraProgramPages | uint64 | Number of Extra Program Pages of code space |
-| 7 | AppCreator | []byte | Creator address |
-| 8 | AppAddress | []byte | Address for which this application has authority |
+| 7 | AppCreator | address | Creator address |
+| 8 | AppAddress | address | Address for which this application has authority |
**Account Fields**
@@ -594,7 +667,7 @@ Account fields used in the `acct_params_get` opcode.
| - | ------ | -- | - | --------- |
| 0 | AcctBalance | uint64 | | Account balance in microalgos |
| 1 | AcctMinBalance | uint64 | | Minimum required balance for account, in microalgos |
-| 2 | AcctAuthAddr | []byte | | Address the account is rekeyed to. |
+| 2 | AcctAuthAddr | address | | Address the account is rekeyed to. |
| 3 | AcctTotalNumUint | uint64 | v8 | The total number of uint64 values allocated by this account in Global and Local States. |
| 4 | AcctTotalNumByteSlice | uint64 | v8 | The total number of byte array values allocated by this account in Global and Local States. |
| 5 | AcctTotalExtraAppPages | uint64 | v8 | The number of extra app code pages used by this account. |
diff --git a/data/transactions/logic/README_in.md b/data/transactions/logic/README_in.md
index 58652ebcf..e0cc32c33 100644
--- a/data/transactions/logic/README_in.md
+++ b/data/transactions/logic/README_in.md
@@ -40,6 +40,17 @@ has fewer than two elements, the operation fails. Some operations, like
`frame_dig` and `proto` could fail because of an attempt to access
above the current stack.
+## Stack Types
+
+While every element of the stack is restricted to the types `uint64` and `bytes`,
+the values of these types may be known to be bounded. The more common bounded types are
+named to provide more semantic information in the documentation. They're also used during
+assembly time to do type checking and to provide more informative error messages.
+
+
+@@ named_stack_types.md @@
+
+
## Scratch Space
In addition to the stack there are 256 positions of scratch
@@ -193,6 +204,41 @@ _available_.
* Since v7, the account associated with any contract present in the
`txn.ForeignApplications` field is _available_.
+
+ * Since v9, there is group-level resource sharing. Any resource that
+ is available in _some_ top-level transaction in a transaction group
+ is available in _all_ v9 or later application calls in the group,
+ whether those application calls are top-level or inner.
+
+ * When considering whether an asset holding or application local
+ state is available by group-level resource sharing, the holding or
+ local state must be available in a top-level transaction without
+ considering group sharing. For example, if account A is made
+ available in one transaction, and asset X is made available in
+ another, group resource sharing does _not_ make A's X holding
+ available.
+
+ * Top-level transactions that are not application calls also make
+ resources available to group-level resource sharing. The following
+ resources are made available by other transaction types.
+
+ 1. `pay` - `txn.Sender`, `txn.Receiver`, and
+ `txn.CloseRemainderTo` (if set).
+
+ 1. `keyreg` - `txn.Sender`
+
+ 1. `acfg` - `txn.Sender`, `txn.ConfigAsset`, and the
+ `txn.ConfigAsset` holding of `txn.Sender`.
+
+ 1. `axfer` - `txn.Sender`, `txn.AssetReceiver`, `txn.AssetSender`
+ (if set), `txnAssetCloseTo` (if set), `txn.XferAsset`, and the
+ `txn.XferAsset` holding of each of those accounts.
+
+ 1. `afrz` - `txn.Sender`, `txn.FreezeAccount`, `txn.FreezeAsset`,
+ and the `txn.FreezeAsset` holding of `txn.FreezeAccount`. The
+ `txn.FreezeAsset` holding of `txn.Sender` is _not_ made
+ available.
+
* A Box is _available_ to an Approval Program if _any_ transaction in
the same group contains a box reference (`txn.Boxes`) that denotes
@@ -202,6 +248,18 @@ _available_.
indicates the application ID of the app called by that
transaction. No box is ever _available_ to a ClearStateProgram.
+Regardless of _availability_, any attempt to access an Asset or
+Application with an ID less than 256 from within a Contract will fail
+immediately. This avoids any ambiguity in opcodes that interpret their
+integer arguments as resource IDs _or_ indexes into the
+`txn.ForeignAssets` or `txn.ForeignApplications` arrays.
+
+It is recommended that contract authors avoid supplying array indexes
+to these opcodes, and always use explicit resource IDs. By using
+explicit IDs, contracts will better take advantage of group resource
+sharing. The array indexing interpretation may be deprecated in a
+future version.
+
## Constants
Constants can be pushed onto the stack in two different ways:
diff --git a/data/transactions/logic/TEAL_opcodes.md b/data/transactions/logic/TEAL_opcodes.md
index 54a80db8f..5a30ce35c 100644
--- a/data/transactions/logic/TEAL_opcodes.md
+++ b/data/transactions/logic/TEAL_opcodes.md
@@ -5,14 +5,14 @@ Ops have a 'cost' of 1 unless otherwise specified.
## err
-- Opcode: 0x00
+- Bytecode: 0x00
- Stack: ... &rarr; _exits_
- Fail immediately.
## sha256
-- Opcode: 0x01
-- Stack: ..., A: []byte &rarr; ..., []byte
+- Bytecode: 0x01
+- Stack: ..., A: []byte &rarr; ..., [32]byte
- SHA256 hash of value A, yields [32]byte
- **Cost**:
- 7 (v1)
@@ -20,8 +20,8 @@ Ops have a 'cost' of 1 unless otherwise specified.
## keccak256
-- Opcode: 0x02
-- Stack: ..., A: []byte &rarr; ..., []byte
+- Bytecode: 0x02
+- Stack: ..., A: []byte &rarr; ..., [32]byte
- Keccak256 hash of value A, yields [32]byte
- **Cost**:
- 26 (v1)
@@ -29,8 +29,8 @@ Ops have a 'cost' of 1 unless otherwise specified.
## sha512_256
-- Opcode: 0x03
-- Stack: ..., A: []byte &rarr; ..., []byte
+- Bytecode: 0x03
+- Stack: ..., A: []byte &rarr; ..., [32]byte
- SHA512_256 hash of value A, yields [32]byte
- **Cost**:
- 9 (v1)
@@ -38,22 +38,25 @@ Ops have a 'cost' of 1 unless otherwise specified.
## ed25519verify
-- Opcode: 0x04
-- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., uint64
+- Bytecode: 0x04
+- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., bool
- for (data A, signature B, pubkey C) verify the signature of ("ProgData" || program_hash || data) against the pubkey => {0 or 1}
- **Cost**: 1900
The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack.
-## ecdsa_verify v
+## ecdsa_verify
-- Opcode: 0x05 {uint8 curve index}
-- Stack: ..., A: []byte, B: []byte, C: []byte, D: []byte, E: []byte &rarr; ..., uint64
+- Syntax: `ecdsa_verify V` ∋ V: [ECDSA](#field-group-ecdsa)
+- Bytecode: 0x05 {uint8}
+- Stack: ..., A: []byte, B: []byte, C: []byte, D: []byte, E: []byte &rarr; ..., bool
- for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey => {0 or 1}
- **Cost**: Secp256k1=1700 Secp256r1=2500
- Availability: v5
-`ECDSA` Curves:
+### ECDSA
+
+Curves
| Index | Name | In | Notes |
| - | ------ | - | --------- |
@@ -63,9 +66,10 @@ The 32 byte public key is the last element on the stack, preceded by the 64 byte
The 32 byte Y-component of a public key is the last element on the stack, preceded by X-component of a pubkey, preceded by S and R components of a signature, preceded by the data that is fifth element on the stack. All values are big-endian encoded. The signed data must be 32 bytes long, and signatures in lower-S form are only accepted.
-## ecdsa_pk_decompress v
+## ecdsa_pk_decompress
-- Opcode: 0x06 {uint8 curve index}
+- Syntax: `ecdsa_pk_decompress V` ∋ V: [ECDSA](#field-group-ecdsa)
+- Bytecode: 0x06 {uint8}
- Stack: ..., A: []byte &rarr; ..., X: []byte, Y: []byte
- decompress pubkey A into components X, Y
- **Cost**: Secp256k1=650 Secp256r1=2400
@@ -73,9 +77,10 @@ The 32 byte Y-component of a public key is the last element on the stack, preced
The 33 byte public key in a compressed form to be decompressed into X and Y (top) components. All values are big-endian encoded.
-## ecdsa_pk_recover v
+## ecdsa_pk_recover
-- Opcode: 0x07 {uint8 curve index}
+- Syntax: `ecdsa_pk_recover V` ∋ V: [ECDSA](#field-group-ecdsa)
+- Bytecode: 0x07 {uint8}
- Stack: ..., A: []byte, B: uint64, C: []byte, D: []byte &rarr; ..., X: []byte, Y: []byte
- for (data A, recovery id B, signature C, D) recover a public key
- **Cost**: 2000
@@ -85,7 +90,7 @@ S (top) and R elements of a signature, recovery id and data (bottom) are expecte
## +
-- Opcode: 0x08
+- Bytecode: 0x08
- Stack: ..., A: uint64, B: uint64 &rarr; ..., uint64
- A plus B. Fail on overflow.
@@ -93,13 +98,13 @@ Overflow is an error condition which halts execution and fails the transaction.
## -
-- Opcode: 0x09
+- Bytecode: 0x09
- Stack: ..., A: uint64, B: uint64 &rarr; ..., uint64
- A minus B. Fail if B > A.
## /
-- Opcode: 0x0a
+- Bytecode: 0x0a
- Stack: ..., A: uint64, B: uint64 &rarr; ..., uint64
- A divided by B (truncated division). Fail if B == 0.
@@ -107,7 +112,7 @@ Overflow is an error condition which halts execution and fails the transaction.
## *
-- Opcode: 0x0b
+- Bytecode: 0x0b
- Stack: ..., A: uint64, B: uint64 &rarr; ..., uint64
- A times B. Fail on overflow.
@@ -115,73 +120,73 @@ Overflow is an error condition which halts execution and fails the transaction.
## <
-- Opcode: 0x0c
-- Stack: ..., A: uint64, B: uint64 &rarr; ..., uint64
+- Bytecode: 0x0c
+- Stack: ..., A: uint64, B: uint64 &rarr; ..., bool
- A less than B => {0 or 1}
## >
-- Opcode: 0x0d
-- Stack: ..., A: uint64, B: uint64 &rarr; ..., uint64
+- Bytecode: 0x0d
+- Stack: ..., A: uint64, B: uint64 &rarr; ..., bool
- A greater than B => {0 or 1}
## <=
-- Opcode: 0x0e
-- Stack: ..., A: uint64, B: uint64 &rarr; ..., uint64
+- Bytecode: 0x0e
+- Stack: ..., A: uint64, B: uint64 &rarr; ..., bool
- A less than or equal to B => {0 or 1}
## >=
-- Opcode: 0x0f
-- Stack: ..., A: uint64, B: uint64 &rarr; ..., uint64
+- Bytecode: 0x0f
+- Stack: ..., A: uint64, B: uint64 &rarr; ..., bool
- A greater than or equal to B => {0 or 1}
## &&
-- Opcode: 0x10
-- Stack: ..., A: uint64, B: uint64 &rarr; ..., uint64
+- Bytecode: 0x10
+- Stack: ..., A: uint64, B: uint64 &rarr; ..., bool
- A is not zero and B is not zero => {0 or 1}
## ||
-- Opcode: 0x11
-- Stack: ..., A: uint64, B: uint64 &rarr; ..., uint64
+- Bytecode: 0x11
+- Stack: ..., A: uint64, B: uint64 &rarr; ..., bool
- A is not zero or B is not zero => {0 or 1}
## ==
-- Opcode: 0x12
-- Stack: ..., A, B &rarr; ..., uint64
+- Bytecode: 0x12
+- Stack: ..., A, B &rarr; ..., bool
- A is equal to B => {0 or 1}
## !=
-- Opcode: 0x13
-- Stack: ..., A, B &rarr; ..., uint64
+- Bytecode: 0x13
+- Stack: ..., A, B &rarr; ..., bool
- A is not equal to B => {0 or 1}
## !
-- Opcode: 0x14
+- Bytecode: 0x14
- Stack: ..., A: uint64 &rarr; ..., uint64
- A == 0 yields 1; else 0
## len
-- Opcode: 0x15
+- Bytecode: 0x15
- Stack: ..., A: []byte &rarr; ..., uint64
- yields length of byte value A
## itob
-- Opcode: 0x16
+- Bytecode: 0x16
- Stack: ..., A: uint64 &rarr; ..., []byte
- converts uint64 A to big-endian byte array, always of length 8
## btoi
-- Opcode: 0x17
+- Bytecode: 0x17
- Stack: ..., A: []byte &rarr; ..., uint64
- converts big-endian byte array A to uint64. Fails if len(A) > 8. Padded by leading 0s if len(A) < 8.
@@ -189,50 +194,50 @@ Overflow is an error condition which halts execution and fails the transaction.
## %
-- Opcode: 0x18
+- Bytecode: 0x18
- Stack: ..., A: uint64, B: uint64 &rarr; ..., uint64
- A modulo B. Fail if B == 0.
## |
-- Opcode: 0x19
+- Bytecode: 0x19
- Stack: ..., A: uint64, B: uint64 &rarr; ..., uint64
- A bitwise-or B
## &
-- Opcode: 0x1a
+- Bytecode: 0x1a
- Stack: ..., A: uint64, B: uint64 &rarr; ..., uint64
- A bitwise-and B
## ^
-- Opcode: 0x1b
+- Bytecode: 0x1b
- Stack: ..., A: uint64, B: uint64 &rarr; ..., uint64
- A bitwise-xor B
## ~
-- Opcode: 0x1c
+- Bytecode: 0x1c
- Stack: ..., A: uint64 &rarr; ..., uint64
- bitwise invert value A
## mulw
-- Opcode: 0x1d
+- Bytecode: 0x1d
- Stack: ..., A: uint64, B: uint64 &rarr; ..., X: uint64, Y: uint64
- A times B as a 128-bit result in two uint64s. X is the high 64 bits, Y is the low
## addw
-- Opcode: 0x1e
+- Bytecode: 0x1e
- Stack: ..., A: uint64, B: uint64 &rarr; ..., X: uint64, Y: uint64
- A plus B as a 128-bit result. X is the carry-bit, Y is the low-order 64 bits.
- Availability: v2
## divmodw
-- Opcode: 0x1f
+- Bytecode: 0x1f
- Stack: ..., A: uint64, B: uint64, C: uint64, D: uint64 &rarr; ..., W: uint64, X: uint64, Y: uint64, Z: uint64
- W,X = (A,B / C,D); Y,Z = (A,B modulo C,D)
- **Cost**: 20
@@ -240,139 +245,147 @@ Overflow is an error condition which halts execution and fails the transaction.
The notation J,K indicates that two uint64 values J and K are interpreted as a uint128 value, with J as the high uint64 and K the low.
-## intcblock uint ...
+## intcblock
-- Opcode: 0x20 {varuint count} [{varuint value}, ...]
+- Syntax: `intcblock UINT ...` ∋ UINT ...: a block of int constant values
+- Bytecode: 0x20 {varuint count, [varuint ...]}
- Stack: ... &rarr; ...
- prepare block of uint64 constants for use by intc
`intcblock` loads following program bytes into an array of integer constants in the evaluator. These integer constants can be referred to by `intc` and `intc_*` which will push the value onto the stack. Subsequent calls to `intcblock` reset and replace the integer constants available to the script.
-## intc i
+## intc
-- Opcode: 0x21 {uint8 int constant index}
+- Syntax: `intc I` ∋ I: an index in the intcblock
+- Bytecode: 0x21 {uint8}
- Stack: ... &rarr; ..., uint64
- Ith constant from intcblock
## intc_0
-- Opcode: 0x22
+- Bytecode: 0x22
- Stack: ... &rarr; ..., uint64
- constant 0 from intcblock
## intc_1
-- Opcode: 0x23
+- Bytecode: 0x23
- Stack: ... &rarr; ..., uint64
- constant 1 from intcblock
## intc_2
-- Opcode: 0x24
+- Bytecode: 0x24
- Stack: ... &rarr; ..., uint64
- constant 2 from intcblock
## intc_3
-- Opcode: 0x25
+- Bytecode: 0x25
- Stack: ... &rarr; ..., uint64
- constant 3 from intcblock
-## bytecblock bytes ...
+## bytecblock
-- Opcode: 0x26 {varuint count} [({varuint length} bytes), ...]
+- Syntax: `bytecblock BYTES ...` ∋ BYTES ...: a block of byte constant values
+- Bytecode: 0x26 {varuint count, [varuint length, bytes ...]}
- Stack: ... &rarr; ...
- prepare block of byte-array constants for use by bytec
`bytecblock` loads the following program bytes into an array of byte-array constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script.
-## bytec i
+## bytec
-- Opcode: 0x27 {uint8 byte constant index}
+- Syntax: `bytec I` ∋ I: an index in the bytecblock
+- Bytecode: 0x27 {uint8}
- Stack: ... &rarr; ..., []byte
- Ith constant from bytecblock
## bytec_0
-- Opcode: 0x28
+- Bytecode: 0x28
- Stack: ... &rarr; ..., []byte
- constant 0 from bytecblock
## bytec_1
-- Opcode: 0x29
+- Bytecode: 0x29
- Stack: ... &rarr; ..., []byte
- constant 1 from bytecblock
## bytec_2
-- Opcode: 0x2a
+- Bytecode: 0x2a
- Stack: ... &rarr; ..., []byte
- constant 2 from bytecblock
## bytec_3
-- Opcode: 0x2b
+- Bytecode: 0x2b
- Stack: ... &rarr; ..., []byte
- constant 3 from bytecblock
-## arg n
+## arg
-- Opcode: 0x2c {uint8 arg index}
+- Syntax: `arg N` ∋ N: an arg index
+- Bytecode: 0x2c {uint8}
- Stack: ... &rarr; ..., []byte
- Nth LogicSig argument
- Mode: Signature
## arg_0
-- Opcode: 0x2d
+- Bytecode: 0x2d
- Stack: ... &rarr; ..., []byte
- LogicSig argument 0
- Mode: Signature
## arg_1
-- Opcode: 0x2e
+- Bytecode: 0x2e
- Stack: ... &rarr; ..., []byte
- LogicSig argument 1
- Mode: Signature
## arg_2
-- Opcode: 0x2f
+- Bytecode: 0x2f
- Stack: ... &rarr; ..., []byte
- LogicSig argument 2
- Mode: Signature
## arg_3
-- Opcode: 0x30
+- Bytecode: 0x30
- Stack: ... &rarr; ..., []byte
- LogicSig argument 3
- Mode: Signature
-## txn f
+## txn
-- Opcode: 0x31 {uint8 transaction field index}
+- Syntax: `txn F` ∋ F: [txn](#field-group-txn)
+- Bytecode: 0x31 {uint8}
- Stack: ... &rarr; ..., any
- field F of current transaction
-`txn` Fields (see [transaction reference](https://developer.algorand.org/docs/reference/transactions/)):
+### txn
+
+Fields (see [transaction reference](https://developer.algorand.org/docs/reference/transactions/))
| Index | Name | Type | In | Notes |
| - | ------ | -- | - | --------- |
-| 0 | Sender | []byte | | 32 byte address |
+| 0 | Sender | address | | 32 byte address |
| 1 | Fee | uint64 | | microalgos |
| 2 | FirstValid | uint64 | | round number |
| 3 | FirstValidTime | uint64 | v7 | UNIX timestamp of block before txn.FirstValid. Fails if negative |
| 4 | LastValid | uint64 | | round number |
| 5 | Note | []byte | | Any data up to 1024 bytes |
-| 6 | Lease | []byte | | 32 byte lease value |
-| 7 | Receiver | []byte | | 32 byte address |
+| 6 | Lease | [32]byte | | 32 byte lease value |
+| 7 | Receiver | address | | 32 byte address |
| 8 | Amount | uint64 | | microalgos |
-| 9 | CloseRemainderTo | []byte | | 32 byte address |
-| 10 | VotePK | []byte | | 32 byte address |
-| 11 | SelectionPK | []byte | | 32 byte address |
+| 9 | CloseRemainderTo | address | | 32 byte address |
+| 10 | VotePK | [32]byte | | 32 byte address |
+| 11 | SelectionPK | [32]byte | | 32 byte address |
| 12 | VoteFirst | uint64 | | The first round that the participation key is valid. |
| 13 | VoteLast | uint64 | | The last round that the participation key is valid. |
| 14 | VoteKeyDilution | uint64 | | Dilution for the 2-level participation key |
@@ -380,33 +393,33 @@ The notation J,K indicates that two uint64 values J and K are interpreted as a u
| 16 | TypeEnum | uint64 | | Transaction type as integer |
| 17 | XferAsset | uint64 | | Asset ID |
| 18 | AssetAmount | uint64 | | value in Asset's units |
-| 19 | AssetSender | []byte | | 32 byte address. Source of assets if Sender is the Asset's Clawback address. |
-| 20 | AssetReceiver | []byte | | 32 byte address |
-| 21 | AssetCloseTo | []byte | | 32 byte address |
+| 19 | AssetSender | address | | 32 byte address. Source of assets if Sender is the Asset's Clawback address. |
+| 20 | AssetReceiver | address | | 32 byte address |
+| 21 | AssetCloseTo | address | | 32 byte address |
| 22 | GroupIndex | uint64 | | Position of this transaction within an atomic transaction group. A stand-alone transaction is implicitly element 0 in a group of 1 |
-| 23 | TxID | []byte | | The computed ID for this transaction. 32 bytes. |
+| 23 | TxID | [32]byte | | The computed ID for this transaction. 32 bytes. |
| 24 | ApplicationID | uint64 | v2 | ApplicationID from ApplicationCall transaction |
| 25 | OnCompletion | uint64 | v2 | ApplicationCall transaction on completion action |
| 27 | NumAppArgs | uint64 | v2 | Number of ApplicationArgs |
| 29 | NumAccounts | uint64 | v2 | Number of Accounts |
| 30 | ApprovalProgram | []byte | v2 | Approval program |
| 31 | ClearStateProgram | []byte | v2 | Clear state program |
-| 32 | RekeyTo | []byte | v2 | 32 byte Sender's new AuthAddr |
+| 32 | RekeyTo | address | v2 | 32 byte Sender's new AuthAddr |
| 33 | ConfigAsset | uint64 | v2 | Asset ID in asset config transaction |
| 34 | ConfigAssetTotal | uint64 | v2 | Total number of units of this asset created |
| 35 | ConfigAssetDecimals | uint64 | v2 | Number of digits to display after the decimal place when displaying the asset |
-| 36 | ConfigAssetDefaultFrozen | uint64 | v2 | Whether the asset's slots are frozen by default or not, 0 or 1 |
+| 36 | ConfigAssetDefaultFrozen | bool | v2 | Whether the asset's slots are frozen by default or not, 0 or 1 |
| 37 | ConfigAssetUnitName | []byte | v2 | Unit name of the asset |
| 38 | ConfigAssetName | []byte | v2 | The asset name |
| 39 | ConfigAssetURL | []byte | v2 | URL |
-| 40 | ConfigAssetMetadataHash | []byte | v2 | 32 byte commitment to unspecified asset metadata |
-| 41 | ConfigAssetManager | []byte | v2 | 32 byte address |
-| 42 | ConfigAssetReserve | []byte | v2 | 32 byte address |
-| 43 | ConfigAssetFreeze | []byte | v2 | 32 byte address |
-| 44 | ConfigAssetClawback | []byte | v2 | 32 byte address |
+| 40 | ConfigAssetMetadataHash | [32]byte | v2 | 32 byte commitment to unspecified asset metadata |
+| 41 | ConfigAssetManager | address | v2 | 32 byte address |
+| 42 | ConfigAssetReserve | address | v2 | 32 byte address |
+| 43 | ConfigAssetFreeze | address | v2 | 32 byte address |
+| 44 | ConfigAssetClawback | address | v2 | 32 byte address |
| 45 | FreezeAsset | uint64 | v2 | Asset ID being frozen or un-frozen |
-| 46 | FreezeAssetAccount | []byte | v2 | 32 byte address of the account whose asset slot is being frozen or un-frozen |
-| 47 | FreezeAssetFrozen | uint64 | v2 | The new frozen value, 0 or 1 |
+| 46 | FreezeAssetAccount | address | v2 | 32 byte address of the account whose asset slot is being frozen or un-frozen |
+| 47 | FreezeAssetFrozen | bool | v2 | The new frozen value, 0 or 1 |
| 49 | NumAssets | uint64 | v3 | Number of Assets |
| 51 | NumApplications | uint64 | v3 | Number of Applications |
| 52 | GlobalNumUint | uint64 | v3 | Number of global state integers in ApplicationCall |
@@ -414,7 +427,7 @@ The notation J,K indicates that two uint64 values J and K are interpreted as a u
| 54 | LocalNumUint | uint64 | v3 | Number of local state integers in ApplicationCall |
| 55 | LocalNumByteSlice | uint64 | v3 | Number of local state byteslices in ApplicationCall |
| 56 | ExtraProgramPages | uint64 | v4 | Number of additional pages for each of the application's approval and clear state programs. An ExtraProgramPages of 1 means 2048 more total bytes, or 1024 for each program. |
-| 57 | Nonparticipation | uint64 | v5 | Marks an account nonparticipating for rewards |
+| 57 | Nonparticipation | bool | v5 | Marks an account nonparticipating for rewards |
| 59 | NumLogs | uint64 | v5 | Number of Logs (only with `itxn` in v5). Application mode only |
| 60 | CreatedAssetID | uint64 | v5 | Asset ID allocated by the creation of an ASA (only with `itxn` in v5). Application mode only |
| 61 | CreatedApplicationID | uint64 | v5 | ApplicationID allocated by the creation of an application (only with `itxn` in v5). Application mode only |
@@ -424,66 +437,75 @@ The notation J,K indicates that two uint64 values J and K are interpreted as a u
| 67 | NumClearStateProgramPages | uint64 | v7 | Number of ClearState Program pages |
-## global f
+## global
-- Opcode: 0x32 {uint8 global field index}
+- Syntax: `global F` ∋ F: [global](#field-group-global)
+- Bytecode: 0x32 {uint8}
- Stack: ... &rarr; ..., any
- global field F
-`global` Fields:
+### global
+
+Fields
| Index | Name | Type | In | Notes |
| - | ------ | -- | - | --------- |
| 0 | MinTxnFee | uint64 | | microalgos |
| 1 | MinBalance | uint64 | | microalgos |
| 2 | MaxTxnLife | uint64 | | rounds |
-| 3 | ZeroAddress | []byte | | 32 byte address of all zero bytes |
+| 3 | ZeroAddress | address | | 32 byte address of all zero bytes |
| 4 | GroupSize | uint64 | | Number of transactions in this atomic transaction group. At least 1 |
| 5 | LogicSigVersion | uint64 | v2 | Maximum supported version |
| 6 | Round | uint64 | v2 | Current round number. Application mode only. |
| 7 | LatestTimestamp | uint64 | v2 | Last confirmed block UNIX timestamp. Fails if negative. Application mode only. |
| 8 | CurrentApplicationID | uint64 | v2 | ID of current application executing. Application mode only. |
-| 9 | CreatorAddress | []byte | v3 | Address of the creator of the current application. Application mode only. |
-| 10 | CurrentApplicationAddress | []byte | v5 | Address that the current application controls. Application mode only. |
-| 11 | GroupID | []byte | v5 | ID of the transaction group. 32 zero bytes if the transaction is not part of a group. |
+| 9 | CreatorAddress | address | v3 | Address of the creator of the current application. Application mode only. |
+| 10 | CurrentApplicationAddress | address | v5 | Address that the current application controls. Application mode only. |
+| 11 | GroupID | [32]byte | v5 | ID of the transaction group. 32 zero bytes if the transaction is not part of a group. |
| 12 | OpcodeBudget | uint64 | v6 | The remaining cost that can be spent by opcodes in this program. |
| 13 | CallerApplicationID | uint64 | v6 | The application ID of the application that called this application. 0 if this application is at the top-level. Application mode only. |
-| 14 | CallerApplicationAddress | []byte | v6 | The application address of the application that called this application. ZeroAddress if this application is at the top-level. Application mode only. |
+| 14 | CallerApplicationAddress | address | v6 | The application address of the application that called this application. ZeroAddress if this application is at the top-level. Application mode only. |
-## gtxn t f
+## gtxn
-- Opcode: 0x33 {uint8 transaction group index} {uint8 transaction field index}
+- Syntax: `gtxn T F` ∋ T: transaction group index, F: [txn](#field-group-txn)
+- Bytecode: 0x33 {uint8}, {uint8}
- Stack: ... &rarr; ..., any
- field F of the Tth transaction in the current group
for notes on transaction fields available, see `txn`. If this transaction is _i_ in the group, `gtxn i field` is equivalent to `txn field`.
-## load i
+## load
-- Opcode: 0x34 {uint8 position in scratch space to load from}
+- Syntax: `load I` ∋ I: position in scratch space to load from
+- Bytecode: 0x34 {uint8}
- Stack: ... &rarr; ..., any
- Ith scratch space value. All scratch spaces are 0 at program start.
-## store i
+## store
-- Opcode: 0x35 {uint8 position in scratch space to store to}
+- Syntax: `store I` ∋ I: position in scratch space to store to
+- Bytecode: 0x35 {uint8}
- Stack: ..., A &rarr; ...
- store A to the Ith scratch space
-## txna f i
+## txna
-- Opcode: 0x36 {uint8 transaction field index} {uint8 transaction field array index}
+- Syntax: `txna F I` ∋ F: [txna](#field-group-txna), I: transaction field array index
+- Bytecode: 0x36 {uint8}, {uint8}
- Stack: ... &rarr; ..., any
- Ith value of the array field F of the current transaction<br />`txna` can be called using `txn` with 2 immediates.
- Availability: v2
-`txna` Fields (see [transaction reference](https://developer.algorand.org/docs/reference/transactions/)):
+### txna
+
+Fields (see [transaction reference](https://developer.algorand.org/docs/reference/transactions/))
| Index | Name | Type | In | Notes |
| - | ------ | -- | - | --------- |
| 26 | ApplicationArgs | []byte | v2 | Arguments passed to the application in the ApplicationCall transaction |
-| 28 | Accounts | []byte | v2 | Accounts listed in the ApplicationCall transaction |
+| 28 | Accounts | address | v2 | Accounts listed in the ApplicationCall transaction |
| 48 | Assets | uint64 | v3 | Foreign Assets listed in the ApplicationCall transaction |
| 50 | Applications | uint64 | v3 | Foreign Apps listed in the ApplicationCall transaction |
| 58 | Logs | []byte | v5 | Log messages emitted by an application call (only with `itxn` in v5). Application mode only |
@@ -491,32 +513,36 @@ for notes on transaction fields available, see `txn`. If this transaction is _i_
| 66 | ClearStateProgramPages | []byte | v7 | ClearState Program as an array of pages |
-## gtxna t f i
+## gtxna
-- Opcode: 0x37 {uint8 transaction group index} {uint8 transaction field index} {uint8 transaction field array index}
+- Syntax: `gtxna T F I` ∋ T: transaction group index, F: [txna](#field-group-txna), I: transaction field array index
+- Bytecode: 0x37 {uint8}, {uint8}, {uint8}
- Stack: ... &rarr; ..., any
- Ith value of the array field F from the Tth transaction in the current group<br />`gtxna` can be called using `gtxn` with 3 immediates.
- Availability: v2
-## gtxns f
+## gtxns
-- Opcode: 0x38 {uint8 transaction field index}
+- Syntax: `gtxns F` ∋ F: [txn](#field-group-txn)
+- Bytecode: 0x38 {uint8}
- Stack: ..., A: uint64 &rarr; ..., any
- field F of the Ath transaction in the current group
- Availability: v3
for notes on transaction fields available, see `txn`. If top of stack is _i_, `gtxns field` is equivalent to `gtxn _i_ field`. gtxns exists so that _i_ can be calculated, often based on the index of the current transaction.
-## gtxnsa f i
+## gtxnsa
-- Opcode: 0x39 {uint8 transaction field index} {uint8 transaction field array index}
+- Syntax: `gtxnsa F I` ∋ F: [txna](#field-group-txna), I: transaction field array index
+- Bytecode: 0x39 {uint8}, {uint8}
- Stack: ..., A: uint64 &rarr; ..., any
- Ith value of the array field F from the Ath transaction in the current group<br />`gtxnsa` can be called using `gtxns` with 2 immediates.
- Availability: v3
-## gload t i
+## gload
-- Opcode: 0x3a {uint8 transaction group index} {uint8 position in scratch space to load from}
+- Syntax: `gload T I` ∋ T: transaction group index, I: position in scratch space to load from
+- Bytecode: 0x3a {uint8}, {uint8}
- Stack: ... &rarr; ..., any
- Ith scratch space value of the Tth transaction in the current group
- Availability: v4
@@ -524,9 +550,10 @@ for notes on transaction fields available, see `txn`. If top of stack is _i_, `g
`gload` fails unless the requested transaction is an ApplicationCall and T < GroupIndex.
-## gloads i
+## gloads
-- Opcode: 0x3b {uint8 position in scratch space to load from}
+- Syntax: `gloads I` ∋ I: position in scratch space to load from
+- Bytecode: 0x3b {uint8}
- Stack: ..., A: uint64 &rarr; ..., any
- Ith scratch space value of the Ath transaction in the current group
- Availability: v4
@@ -534,9 +561,10 @@ for notes on transaction fields available, see `txn`. If top of stack is _i_, `g
`gloads` fails unless the requested transaction is an ApplicationCall and A < GroupIndex.
-## gaid t
+## gaid
-- Opcode: 0x3c {uint8 transaction group index}
+- Syntax: `gaid T` ∋ T: transaction group index
+- Bytecode: 0x3c {uint8}
- Stack: ... &rarr; ..., uint64
- ID of the asset or application created in the Tth transaction of the current group
- Availability: v4
@@ -546,7 +574,7 @@ for notes on transaction fields available, see `txn`. If top of stack is _i_, `g
## gaids
-- Opcode: 0x3d
+- Bytecode: 0x3d
- Stack: ..., A: uint64 &rarr; ..., uint64
- ID of the asset or application created in the Ath transaction of the current group
- Availability: v4
@@ -556,21 +584,22 @@ for notes on transaction fields available, see `txn`. If top of stack is _i_, `g
## loads
-- Opcode: 0x3e
+- Bytecode: 0x3e
- Stack: ..., A: uint64 &rarr; ..., any
- Ath scratch space value. All scratch spaces are 0 at program start.
- Availability: v5
## stores
-- Opcode: 0x3f
+- Bytecode: 0x3f
- Stack: ..., A: uint64, B &rarr; ...
- store B to the Ath scratch space
- Availability: v5
-## bnz target
+## bnz
-- Opcode: 0x40 {int16 branch offset, big-endian}
+- Syntax: `bnz TARGET` ∋ TARGET: branch offset
+- Bytecode: 0x40 {int16 (big-endian)}
- Stack: ..., A: uint64 &rarr; ...
- branch to TARGET if value A is not zero
@@ -578,18 +607,20 @@ The `bnz` instruction opcode 0x40 is followed by two immediate data bytes which
At v2 it became allowed to branch to the end of the program exactly after the last instruction: bnz to byte N (with 0-indexing) was illegal for a TEAL program with N bytes before v2, and is legal after it. This change eliminates the need for a last instruction of no-op as a branch target at the end. (Branching beyond the end--in other words, to a byte larger than N--is still illegal and will cause the program to fail.)
-## bz target
+## bz
-- Opcode: 0x41 {int16 branch offset, big-endian}
+- Syntax: `bz TARGET` ∋ TARGET: branch offset
+- Bytecode: 0x41 {int16 (big-endian)}
- Stack: ..., A: uint64 &rarr; ...
- branch to TARGET if value A is zero
- Availability: v2
See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`.
-## b target
+## b
-- Opcode: 0x42 {int16 branch offset, big-endian}
+- Syntax: `b TARGET` ∋ TARGET: branch offset
+- Bytecode: 0x42 {int16 (big-endian)}
- Stack: ... &rarr; ...
- branch unconditionally to TARGET
- Availability: v2
@@ -598,119 +629,126 @@ See `bnz` for details on how branches work. `b` always jumps to the offset.
## return
-- Opcode: 0x43
+- Bytecode: 0x43
- Stack: ..., A: uint64 &rarr; _exits_
- use A as success value; end
- Availability: v2
## assert
-- Opcode: 0x44
+- Bytecode: 0x44
- Stack: ..., A: uint64 &rarr; ...
- immediately fail unless A is a non-zero number
- Availability: v3
-## bury n
+## bury
-- Opcode: 0x45 {uint8 depth}
+- Syntax: `bury N` ∋ N: depth
+- Bytecode: 0x45 {uint8}
- Stack: ..., A &rarr; ...
- replace the Nth value from the top of the stack with A. bury 0 fails.
- Availability: v8
-## popn n
+## popn
-- Opcode: 0x46 {uint8 stack depth}
+- Syntax: `popn N` ∋ N: stack depth
+- Bytecode: 0x46 {uint8}
- Stack: ..., [N items] &rarr; ...
- remove N values from the top of the stack
- Availability: v8
-## dupn n
+## dupn
-- Opcode: 0x47 {uint8 copy count}
+- Syntax: `dupn N` ∋ N: copy count
+- Bytecode: 0x47 {uint8}
- Stack: ..., A &rarr; ..., A, [N copies of A]
- duplicate A, N times
- Availability: v8
## pop
-- Opcode: 0x48
+- Bytecode: 0x48
- Stack: ..., A &rarr; ...
- discard A
## dup
-- Opcode: 0x49
+- Bytecode: 0x49
- Stack: ..., A &rarr; ..., A, A
- duplicate A
## dup2
-- Opcode: 0x4a
+- Bytecode: 0x4a
- Stack: ..., A, B &rarr; ..., A, B, A, B
- duplicate A and B
- Availability: v2
-## dig n
+## dig
-- Opcode: 0x4b {uint8 depth}
+- Syntax: `dig N` ∋ N: depth
+- Bytecode: 0x4b {uint8}
- Stack: ..., A, [N items] &rarr; ..., A, [N items], A
- Nth value from the top of the stack. dig 0 is equivalent to dup
- Availability: v3
## swap
-- Opcode: 0x4c
+- Bytecode: 0x4c
- Stack: ..., A, B &rarr; ..., B, A
- swaps A and B on stack
- Availability: v3
## select
-- Opcode: 0x4d
+- Bytecode: 0x4d
- Stack: ..., A, B, C: uint64 &rarr; ..., A or B
- selects one of two values based on top-of-stack: B if C != 0, else A
- Availability: v3
-## cover n
+## cover
-- Opcode: 0x4e {uint8 depth}
+- Syntax: `cover N` ∋ N: depth
+- Bytecode: 0x4e {uint8}
- Stack: ..., [N items], A &rarr; ..., A, [N items]
- remove top of stack, and place it deeper in the stack such that N elements are above it. Fails if stack depth <= N.
- Availability: v5
-## uncover n
+## uncover
-- Opcode: 0x4f {uint8 depth}
+- Syntax: `uncover N` ∋ N: depth
+- Bytecode: 0x4f {uint8}
- Stack: ..., A, [N items] &rarr; ..., [N items], A
- remove the value at depth N in the stack and shift above items down so the Nth deep value is on top of the stack. Fails if stack depth <= N.
- Availability: v5
## concat
-- Opcode: 0x50
+- Bytecode: 0x50
- Stack: ..., A: []byte, B: []byte &rarr; ..., []byte
- join A and B
- Availability: v2
`concat` fails if the result would be greater than 4096 bytes.
-## substring s e
+## substring
-- Opcode: 0x51 {uint8 start position} {uint8 end position}
+- Syntax: `substring S E` ∋ S: start position, E: end position
+- Bytecode: 0x51 {uint8}, {uint8}
- Stack: ..., A: []byte &rarr; ..., []byte
- A range of bytes from A starting at S up to but not including E. If E < S, or either is larger than the array length, the program fails
- Availability: v2
## substring3
-- Opcode: 0x52
+- Bytecode: 0x52
- Stack: ..., A: []byte, B: uint64, C: uint64 &rarr; ..., []byte
- A range of bytes from A starting at B up to but not including C. If C < B, or either is larger than the array length, the program fails
- Availability: v2
## getbit
-- Opcode: 0x53
+- Bytecode: 0x53
- Stack: ..., A, B: uint64 &rarr; ..., uint64
- Bth bit of (byte-array or integer) A. If B is greater than or equal to the bit length of the value (8*byte length), the program fails
- Availability: v3
@@ -719,7 +757,7 @@ see explanation of bit ordering in setbit
## setbit
-- Opcode: 0x54
+- Bytecode: 0x54
- Stack: ..., A, B: uint64, C: uint64 &rarr; ..., any
- Copy of (byte-array or integer) A, with the Bth bit set to (0 or 1) C. If B is greater than or equal to the bit length of the value (8*byte length), the program fails
- Availability: v3
@@ -728,76 +766,81 @@ When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on
## getbyte
-- Opcode: 0x55
+- Bytecode: 0x55
- Stack: ..., A: []byte, B: uint64 &rarr; ..., uint64
- Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails
- Availability: v3
## setbyte
-- Opcode: 0x56
+- Bytecode: 0x56
- Stack: ..., A: []byte, B: uint64, C: uint64 &rarr; ..., []byte
- Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails
- Availability: v3
-## extract s l
+## extract
-- Opcode: 0x57 {uint8 start position} {uint8 length}
+- Syntax: `extract S L` ∋ S: start position, L: length
+- Bytecode: 0x57 {uint8}, {uint8}
- Stack: ..., A: []byte &rarr; ..., []byte
- A range of bytes from A starting at S up to but not including S+L. If L is 0, then extract to the end of the string. If S or S+L is larger than the array length, the program fails
- Availability: v5
## extract3
-- Opcode: 0x58
+- Bytecode: 0x58
- Stack: ..., A: []byte, B: uint64, C: uint64 &rarr; ..., []byte
- A range of bytes from A starting at B up to but not including B+C. If B+C is larger than the array length, the program fails<br />`extract3` can be called using `extract` with no immediates.
- Availability: v5
## extract_uint16
-- Opcode: 0x59
+- Bytecode: 0x59
- Stack: ..., A: []byte, B: uint64 &rarr; ..., uint64
- A uint16 formed from a range of big-endian bytes from A starting at B up to but not including B+2. If B+2 is larger than the array length, the program fails
- Availability: v5
## extract_uint32
-- Opcode: 0x5a
+- Bytecode: 0x5a
- Stack: ..., A: []byte, B: uint64 &rarr; ..., uint64
- A uint32 formed from a range of big-endian bytes from A starting at B up to but not including B+4. If B+4 is larger than the array length, the program fails
- Availability: v5
## extract_uint64
-- Opcode: 0x5b
+- Bytecode: 0x5b
- Stack: ..., A: []byte, B: uint64 &rarr; ..., uint64
- A uint64 formed from a range of big-endian bytes from A starting at B up to but not including B+8. If B+8 is larger than the array length, the program fails
- Availability: v5
-## replace2 s
+## replace2
-- Opcode: 0x5c {uint8 start position}
+- Syntax: `replace2 S` ∋ S: start position
+- Bytecode: 0x5c {uint8}
- Stack: ..., A: []byte, B: []byte &rarr; ..., []byte
- Copy of A with the bytes starting at S replaced by the bytes of B. Fails if S+len(B) exceeds len(A)<br />`replace2` can be called using `replace` with 1 immediate.
- Availability: v7
## replace3
-- Opcode: 0x5d
+- Bytecode: 0x5d
- Stack: ..., A: []byte, B: uint64, C: []byte &rarr; ..., []byte
- Copy of A with the bytes starting at B replaced by the bytes of C. Fails if B+len(C) exceeds len(A)<br />`replace3` can be called using `replace` with no immediates.
- Availability: v7
-## base64_decode e
+## base64_decode
-- Opcode: 0x5e {uint8 encoding index}
+- Syntax: `base64_decode E` ∋ E: [base64](#field-group-base64)
+- Bytecode: 0x5e {uint8}
- Stack: ..., A: []byte &rarr; ..., []byte
- decode A which was base64-encoded using _encoding_ E. Fail if A is not base64 encoded with encoding E
- **Cost**: 1 + 1 per 16 bytes of A
- Availability: v7
-`base64` Encodings:
+### base64
+
+Encodings
| Index | Name | Notes |
| - | ------ | --------- |
@@ -809,15 +852,18 @@ When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on
Decodes A using the base64 encoding E. Specify the encoding with an immediate arg either as URL and Filename Safe (`URLEncoding`) or Standard (`StdEncoding`). See [RFC 4648 sections 4 and 5](https://rfc-editor.org/rfc/rfc4648.html#section-4). It is assumed that the encoding ends with the exact number of `=` padding characters as required by the RFC. When padding occurs, any unused pad bits in the encoding must be set to zero or the decoding will fail. The special cases of `\n` and `\r` are allowed but completely ignored. An error will result when attempting to decode a string with a character that is not in the encoding alphabet or not one of `=`, `\r`, or `\n`.
-## json_ref r
+## json_ref
-- Opcode: 0x5f {uint8 return type index}
+- Syntax: `json_ref R` ∋ R: [json_ref](#field-group-json_ref)
+- Bytecode: 0x5f {uint8}
- Stack: ..., A: []byte, B: []byte &rarr; ..., any
- key B's value, of type R, from a [valid](jsonspec.md) utf-8 encoded json object A
- **Cost**: 25 + 2 per 7 bytes of A
- Availability: v7
-`json_ref` Types:
+### json_ref
+
+Types
| Index | Name | Type | Notes |
| - | ------ | -- | --------- |
@@ -832,18 +878,18 @@ Almost all smart contracts should use simpler and smaller methods (such as the [
## balance
-- Opcode: 0x60
+- Bytecode: 0x60
- Stack: ..., A &rarr; ..., uint64
- balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted. Changes caused by inner transactions are observable immediately following `itxn_submit`
- Availability: v2
- Mode: Application
-params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value.
+params: Txn.Accounts offset (or, since v4, an _available_ account address). Return: value.
## app_opted_in
-- Opcode: 0x61
-- Stack: ..., A, B: uint64 &rarr; ..., uint64
+- Bytecode: 0x61
+- Stack: ..., A, B: uint64 &rarr; ..., bool
- 1 if account A is opted in to application B, else 0
- Availability: v2
- Mode: Application
@@ -852,7 +898,7 @@ params: Txn.Accounts offset (or, since v4, an _available_ account address), _ava
## app_local_get
-- Opcode: 0x62
+- Bytecode: 0x62
- Stack: ..., A, B: []byte &rarr; ..., any
- local state of the key B in the current application in account A
- Availability: v2
@@ -862,8 +908,8 @@ params: Txn.Accounts offset (or, since v4, an _available_ account address), stat
## app_local_get_ex
-- Opcode: 0x63
-- Stack: ..., A, B: uint64, C: []byte &rarr; ..., X: any, Y: uint64
+- Bytecode: 0x63
+- Stack: ..., A, B: uint64, C: []byte &rarr; ..., X: any, Y: bool
- X is the local state of application B, key C in account A. Y is 1 if key existed, else 0
- Availability: v2
- Mode: Application
@@ -872,7 +918,7 @@ params: Txn.Accounts offset (or, since v4, an _available_ account address), _ava
## app_global_get
-- Opcode: 0x64
+- Bytecode: 0x64
- Stack: ..., A: []byte &rarr; ..., any
- global state of the key A in the current application
- Availability: v2
@@ -882,8 +928,8 @@ params: state key. Return: value. The value is zero (of type uint64) if the key
## app_global_get_ex
-- Opcode: 0x65
-- Stack: ..., A: uint64, B: []byte &rarr; ..., X: any, Y: uint64
+- Bytecode: 0x65
+- Stack: ..., A: uint64, B: []byte &rarr; ..., X: any, Y: bool
- X is the global state of application A, key B. Y is 1 if key existed, else 0
- Availability: v2
- Mode: Application
@@ -892,7 +938,7 @@ params: Txn.ForeignApps offset (or, since v4, an _available_ application id), st
## app_local_put
-- Opcode: 0x66
+- Bytecode: 0x66
- Stack: ..., A, B: []byte, C &rarr; ...
- write C to key B in account A's local state of the current application
- Availability: v2
@@ -902,7 +948,7 @@ params: Txn.Accounts offset (or, since v4, an _available_ account address), stat
## app_global_put
-- Opcode: 0x67
+- Bytecode: 0x67
- Stack: ..., A: []byte, B &rarr; ...
- write B to key A in the global state of the current application
- Availability: v2
@@ -910,7 +956,7 @@ params: Txn.Accounts offset (or, since v4, an _available_ account address), stat
## app_local_del
-- Opcode: 0x68
+- Bytecode: 0x68
- Stack: ..., A, B: []byte &rarr; ...
- delete key B from account A's local state of the current application
- Availability: v2
@@ -922,7 +968,7 @@ Deleting a key which is already absent has no effect on the application local st
## app_global_del
-- Opcode: 0x69
+- Bytecode: 0x69
- Stack: ..., A: []byte &rarr; ...
- delete key A from the global state of the current application
- Availability: v2
@@ -932,61 +978,70 @@ params: state key.
Deleting a key which is already absent has no effect on the application global state. (In particular, it does _not_ cause the program to fail.)
-## asset_holding_get f
+## asset_holding_get
-- Opcode: 0x70 {uint8 asset holding field index}
-- Stack: ..., A, B: uint64 &rarr; ..., X: any, Y: uint64
+- Syntax: `asset_holding_get F` ∋ F: [asset_holding](#field-group-asset_holding)
+- Bytecode: 0x70 {uint8}
+- Stack: ..., A, B: uint64 &rarr; ..., X: any, Y: bool
- X is field F from account A's holding of asset B. Y is 1 if A is opted into B, else 0
- Availability: v2
- Mode: Application
-`asset_holding` Fields:
+### asset_holding
+
+Fields
| Index | Name | Type | Notes |
| - | ------ | -- | --------- |
| 0 | AssetBalance | uint64 | Amount of the asset unit held by this account |
-| 1 | AssetFrozen | uint64 | Is the asset frozen or not |
+| 1 | AssetFrozen | bool | Is the asset frozen or not |
params: Txn.Accounts offset (or, since v4, an _available_ address), asset id (or, since v4, a Txn.ForeignAssets offset). Return: did_exist flag (1 if the asset existed and 0 otherwise), value.
-## asset_params_get f
+## asset_params_get
-- Opcode: 0x71 {uint8 asset params field index}
-- Stack: ..., A: uint64 &rarr; ..., X: any, Y: uint64
+- Syntax: `asset_params_get F` ∋ F: [asset_params](#field-group-asset_params)
+- Bytecode: 0x71 {uint8}
+- Stack: ..., A: uint64 &rarr; ..., X: any, Y: bool
- X is field F from asset A. Y is 1 if A exists, else 0
- Availability: v2
- Mode: Application
-`asset_params` Fields:
+### asset_params
+
+Fields
| Index | Name | Type | In | Notes |
| - | ------ | -- | - | --------- |
| 0 | AssetTotal | uint64 | | Total number of units of this asset |
| 1 | AssetDecimals | uint64 | | See AssetParams.Decimals |
-| 2 | AssetDefaultFrozen | uint64 | | Frozen by default or not |
+| 2 | AssetDefaultFrozen | bool | | Frozen by default or not |
| 3 | AssetUnitName | []byte | | Asset unit name |
| 4 | AssetName | []byte | | Asset name |
| 5 | AssetURL | []byte | | URL with additional info about the asset |
-| 6 | AssetMetadataHash | []byte | | Arbitrary commitment |
-| 7 | AssetManager | []byte | | Manager address |
-| 8 | AssetReserve | []byte | | Reserve address |
-| 9 | AssetFreeze | []byte | | Freeze address |
-| 10 | AssetClawback | []byte | | Clawback address |
-| 11 | AssetCreator | []byte | v5 | Creator address |
+| 6 | AssetMetadataHash | [32]byte | | Arbitrary commitment |
+| 7 | AssetManager | address | | Manager address |
+| 8 | AssetReserve | address | | Reserve address |
+| 9 | AssetFreeze | address | | Freeze address |
+| 10 | AssetClawback | address | | Clawback address |
+| 11 | AssetCreator | address | v5 | Creator address |
params: Txn.ForeignAssets offset (or, since v4, an _available_ asset id. Return: did_exist flag (1 if the asset existed and 0 otherwise), value.
-## app_params_get f
+## app_params_get
-- Opcode: 0x72 {uint8 app params field index}
-- Stack: ..., A: uint64 &rarr; ..., X: any, Y: uint64
+- Syntax: `app_params_get F` ∋ F: [app_params](#field-group-app_params)
+- Bytecode: 0x72 {uint8}
+- Stack: ..., A: uint64 &rarr; ..., X: any, Y: bool
- X is field F from app A. Y is 1 if A exists, else 0
- Availability: v5
- Mode: Application
-`app_params` Fields:
+### app_params
+
+Fields
| Index | Name | Type | Notes |
| - | ------ | -- | --------- |
@@ -997,27 +1052,30 @@ params: Txn.ForeignAssets offset (or, since v4, an _available_ asset id. Return:
| 4 | AppLocalNumUint | uint64 | Number of uint64 values allowed in Local State |
| 5 | AppLocalNumByteSlice | uint64 | Number of byte array values allowed in Local State |
| 6 | AppExtraProgramPages | uint64 | Number of Extra Program Pages of code space |
-| 7 | AppCreator | []byte | Creator address |
-| 8 | AppAddress | []byte | Address for which this application has authority |
+| 7 | AppCreator | address | Creator address |
+| 8 | AppAddress | address | Address for which this application has authority |
params: Txn.ForeignApps offset or an _available_ app id. Return: did_exist flag (1 if the application existed and 0 otherwise), value.
-## acct_params_get f
+## acct_params_get
-- Opcode: 0x73 {uint8 account params field index}
-- Stack: ..., A &rarr; ..., X: any, Y: uint64
+- Syntax: `acct_params_get F` ∋ F: [acct_params](#field-group-acct_params)
+- Bytecode: 0x73 {uint8}
+- Stack: ..., A &rarr; ..., X: any, Y: bool
- X is field F from account A. Y is 1 if A owns positive algos, else 0
- Availability: v6
- Mode: Application
-`acct_params` Fields:
+### acct_params
+
+Fields
| Index | Name | Type | In | Notes |
| - | ------ | -- | - | --------- |
| 0 | AcctBalance | uint64 | | Account balance in microalgos |
| 1 | AcctMinBalance | uint64 | | Minimum required balance for account, in microalgos |
-| 2 | AcctAuthAddr | []byte | | Address the account is rekeyed to. |
+| 2 | AcctAuthAddr | address | | Address the account is rekeyed to. |
| 3 | AcctTotalNumUint | uint64 | v8 | The total number of uint64 values allocated by this account in Global and Local States. |
| 4 | AcctTotalNumByteSlice | uint64 | v8 | The total number of byte array values allocated by this account in Global and Local States. |
| 5 | AcctTotalExtraAppPages | uint64 | v8 | The number of extra app code pages used by this account. |
@@ -1031,44 +1089,48 @@ params: Txn.ForeignApps offset or an _available_ app id. Return: did_exist flag
## min_balance
-- Opcode: 0x78
+- Bytecode: 0x78
- Stack: ..., A &rarr; ..., uint64
- minimum required balance for account A, in microalgos. Required balance is affected by ASA, App, and Box usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes. Changes caused by inner transactions or box usage are observable immediately following the opcode effecting the change.
- Availability: v3
- Mode: Application
-params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value.
+params: Txn.Accounts offset (or, since v4, an _available_ account address). Return: value.
-## pushbytes bytes
+## pushbytes
-- Opcode: 0x80 {varuint length} {bytes}
+- Syntax: `pushbytes BYTES` ∋ BYTES: a byte constant
+- Bytecode: 0x80 {varuint length, bytes}
- Stack: ... &rarr; ..., []byte
- immediate BYTES
- Availability: v3
pushbytes args are not added to the bytecblock during assembly processes
-## pushint uint
+## pushint
-- Opcode: 0x81 {varuint int}
+- Syntax: `pushint UINT` ∋ UINT: an int constant
+- Bytecode: 0x81 {varuint}
- Stack: ... &rarr; ..., uint64
- immediate UINT
- Availability: v3
pushint args are not added to the intcblock during assembly processes
-## pushbytess bytes ...
+## pushbytess
-- Opcode: 0x82 {varuint count} [({varuint length} bytes), ...]
+- Syntax: `pushbytess BYTES ...` ∋ BYTES ...: a list of byte constants
+- Bytecode: 0x82 {varuint count, [varuint length, bytes ...]}
- Stack: ... &rarr; ..., [N items]
- push sequences of immediate byte arrays to stack (first byte array being deepest)
- Availability: v8
pushbytess args are not added to the bytecblock during assembly processes
-## pushints uint ...
+## pushints
-- Opcode: 0x83 {varuint count} [{varuint value}, ...]
+- Syntax: `pushints UINT ...` ∋ UINT ...: a list of int constants
+- Bytecode: 0x83 {varuint count, [varuint ...]}
- Stack: ... &rarr; ..., [N items]
- push sequence of immediate uints to stack in the order they appear (first uint being deepest)
- Availability: v8
@@ -1077,15 +1139,16 @@ pushints args are not added to the intcblock during assembly processes
## ed25519verify_bare
-- Opcode: 0x84
-- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., uint64
+- Bytecode: 0x84
+- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., bool
- for (data A, signature B, pubkey C) verify the signature of the data against the pubkey => {0 or 1}
- **Cost**: 1900
- Availability: v7
-## callsub target
+## callsub
-- Opcode: 0x88 {int16 branch offset, big-endian}
+- Syntax: `callsub TARGET` ∋ TARGET: branch offset
+- Bytecode: 0x88 {int16 (big-endian)}
- Stack: ... &rarr; ...
- branch unconditionally to TARGET, saving the next instruction on the call stack
- Availability: v4
@@ -1094,46 +1157,51 @@ The call stack is separate from the data stack. Only `callsub`, `retsub`, and `p
## retsub
-- Opcode: 0x89
+- Bytecode: 0x89
- Stack: ... &rarr; ...
- pop the top instruction from the call stack and branch to it
- Availability: v4
If the current frame was prepared by `proto A R`, `retsub` will remove the 'A' arguments from the stack, move the `R` return values down, and pop any stack locations above the relocated return values.
-## proto a r
+## proto
-- Opcode: 0x8a {uint8 arguments} {uint8 return values}
+- Syntax: `proto A R` ∋ A: number of arguments, R: number of return values
+- Bytecode: 0x8a {uint8}, {uint8}
- Stack: ... &rarr; ...
- Prepare top call frame for a retsub that will assume A args and R return values.
- Availability: v8
Fails unless the last instruction executed was a `callsub`.
-## frame_dig i
+## frame_dig
-- Opcode: 0x8b {int8 frame slot}
+- Syntax: `frame_dig I` ∋ I: frame slot
+- Bytecode: 0x8b {int8}
- Stack: ... &rarr; ..., any
- Nth (signed) value from the frame pointer.
- Availability: v8
-## frame_bury i
+## frame_bury
-- Opcode: 0x8c {int8 frame slot}
+- Syntax: `frame_bury I` ∋ I: frame slot
+- Bytecode: 0x8c {int8}
- Stack: ..., A &rarr; ...
- replace the Nth (signed) value from the frame pointer in the stack with A
- Availability: v8
-## switch target ...
+## switch
-- Opcode: 0x8d {uint8 branch count} [{int16 branch offset, big-endian}, ...]
+- Syntax: `switch TARGET ...` ∋ TARGET ...: list of labels
+- Bytecode: 0x8d {varuint count, [int16 (big-endian) ...]}
- Stack: ..., A: uint64 &rarr; ...
- branch to the Ath label. Continue at following instruction if index A exceeds the number of labels.
- Availability: v8
-## match target ...
+## match
-- Opcode: 0x8e {uint8 branch count} [{int16 branch offset, big-endian}, ...]
+- Syntax: `match TARGET ...` ∋ TARGET ...: list of labels
+- Bytecode: 0x8e {varuint count, [int16 (big-endian) ...]}
- Stack: ..., [A1, A2, ..., AN], B &rarr; ...
- given match cases from A[1] to A[N], branch to the Ith label where A[I] = B. Continue to the following instruction if no matches are found.
- Availability: v8
@@ -1142,21 +1210,21 @@ Fails unless the last instruction executed was a `callsub`.
## shl
-- Opcode: 0x90
+- Bytecode: 0x90
- Stack: ..., A: uint64, B: uint64 &rarr; ..., uint64
- A times 2^B, modulo 2^64
- Availability: v4
## shr
-- Opcode: 0x91
+- Bytecode: 0x91
- Stack: ..., A: uint64, B: uint64 &rarr; ..., uint64
- A divided by 2^B
- Availability: v4
## sqrt
-- Opcode: 0x92
+- Bytecode: 0x92
- Stack: ..., A: uint64 &rarr; ..., uint64
- The largest integer I such that I^2 <= A
- **Cost**: 4
@@ -1164,7 +1232,7 @@ Fails unless the last instruction executed was a `callsub`.
## bitlen
-- Opcode: 0x93
+- Bytecode: 0x93
- Stack: ..., A &rarr; ..., uint64
- The highest set bit in A. If A is a byte-array, it is interpreted as a big-endian unsigned integer. bitlen of 0 is 0, bitlen of 8 is 4
- Availability: v4
@@ -1173,14 +1241,14 @@ bitlen interprets arrays as big-endian integers, unlike setbit/getbit
## exp
-- Opcode: 0x94
+- Bytecode: 0x94
- Stack: ..., A: uint64, B: uint64 &rarr; ..., uint64
- A raised to the Bth power. Fail if A == B == 0 and on overflow
- Availability: v4
## expw
-- Opcode: 0x95
+- Bytecode: 0x95
- Stack: ..., A: uint64, B: uint64 &rarr; ..., X: uint64, Y: uint64
- A raised to the Bth power as a 128-bit result in two uint64s. X is the high 64 bits, Y is the low. Fail if A == B == 0 or if the results exceeds 2^128-1
- **Cost**: 10
@@ -1188,7 +1256,7 @@ bitlen interprets arrays as big-endian integers, unlike setbit/getbit
## bsqrt
-- Opcode: 0x96
+- Bytecode: 0x96
- Stack: ..., A: []byte &rarr; ..., []byte
- The largest integer I such that I^2 <= A. A and I are interpreted as big-endian unsigned integers
- **Cost**: 40
@@ -1196,7 +1264,7 @@ bitlen interprets arrays as big-endian integers, unlike setbit/getbit
## divw
-- Opcode: 0x97
+- Bytecode: 0x97
- Stack: ..., A: uint64, B: uint64, C: uint64 &rarr; ..., uint64
- A,B / C. Fail if C == 0 or if result overflows.
- Availability: v6
@@ -1205,7 +1273,7 @@ The notation A,B indicates that A and B are interpreted as a uint128 value, with
## sha3_256
-- Opcode: 0x98
+- Bytecode: 0x98
- Stack: ..., A: []byte &rarr; ..., []byte
- SHA3_256 hash of value A, yields [32]byte
- **Cost**: 130
@@ -1213,81 +1281,81 @@ The notation A,B indicates that A and B are interpreted as a uint128 value, with
## b+
-- Opcode: 0xa0
-- Stack: ..., A: []byte, B: []byte &rarr; ..., []byte
+- Bytecode: 0xa0
+- Stack: ..., A: bigint, B: bigint &rarr; ..., []byte
- A plus B. A and B are interpreted as big-endian unsigned integers
- **Cost**: 10
- Availability: v4
## b-
-- Opcode: 0xa1
-- Stack: ..., A: []byte, B: []byte &rarr; ..., []byte
+- Bytecode: 0xa1
+- Stack: ..., A: bigint, B: bigint &rarr; ..., bigint
- A minus B. A and B are interpreted as big-endian unsigned integers. Fail on underflow.
- **Cost**: 10
- Availability: v4
## b/
-- Opcode: 0xa2
-- Stack: ..., A: []byte, B: []byte &rarr; ..., []byte
+- Bytecode: 0xa2
+- Stack: ..., A: bigint, B: bigint &rarr; ..., bigint
- A divided by B (truncated division). A and B are interpreted as big-endian unsigned integers. Fail if B is zero.
- **Cost**: 20
- Availability: v4
## b*
-- Opcode: 0xa3
-- Stack: ..., A: []byte, B: []byte &rarr; ..., []byte
+- Bytecode: 0xa3
+- Stack: ..., A: bigint, B: bigint &rarr; ..., []byte
- A times B. A and B are interpreted as big-endian unsigned integers.
- **Cost**: 20
- Availability: v4
## b<
-- Opcode: 0xa4
-- Stack: ..., A: []byte, B: []byte &rarr; ..., uint64
+- Bytecode: 0xa4
+- Stack: ..., A: bigint, B: bigint &rarr; ..., bool
- 1 if A is less than B, else 0. A and B are interpreted as big-endian unsigned integers
- Availability: v4
## b>
-- Opcode: 0xa5
-- Stack: ..., A: []byte, B: []byte &rarr; ..., uint64
+- Bytecode: 0xa5
+- Stack: ..., A: bigint, B: bigint &rarr; ..., bool
- 1 if A is greater than B, else 0. A and B are interpreted as big-endian unsigned integers
- Availability: v4
## b<=
-- Opcode: 0xa6
-- Stack: ..., A: []byte, B: []byte &rarr; ..., uint64
+- Bytecode: 0xa6
+- Stack: ..., A: bigint, B: bigint &rarr; ..., bool
- 1 if A is less than or equal to B, else 0. A and B are interpreted as big-endian unsigned integers
- Availability: v4
## b>=
-- Opcode: 0xa7
-- Stack: ..., A: []byte, B: []byte &rarr; ..., uint64
+- Bytecode: 0xa7
+- Stack: ..., A: bigint, B: bigint &rarr; ..., bool
- 1 if A is greater than or equal to B, else 0. A and B are interpreted as big-endian unsigned integers
- Availability: v4
## b==
-- Opcode: 0xa8
-- Stack: ..., A: []byte, B: []byte &rarr; ..., uint64
+- Bytecode: 0xa8
+- Stack: ..., A: bigint, B: bigint &rarr; ..., bool
- 1 if A is equal to B, else 0. A and B are interpreted as big-endian unsigned integers
- Availability: v4
## b!=
-- Opcode: 0xa9
-- Stack: ..., A: []byte, B: []byte &rarr; ..., uint64
+- Bytecode: 0xa9
+- Stack: ..., A: bigint, B: bigint &rarr; ..., bool
- 0 if A is equal to B, else 1. A and B are interpreted as big-endian unsigned integers
- Availability: v4
## b%
-- Opcode: 0xaa
+- Bytecode: 0xaa
- Stack: ..., A: []byte, B: []byte &rarr; ..., []byte
- A modulo B. A and B are interpreted as big-endian unsigned integers. Fail if B is zero.
- **Cost**: 20
@@ -1295,7 +1363,7 @@ The notation A,B indicates that A and B are interpreted as a uint128 value, with
## b|
-- Opcode: 0xab
+- Bytecode: 0xab
- Stack: ..., A: []byte, B: []byte &rarr; ..., []byte
- A bitwise-or B. A and B are zero-left extended to the greater of their lengths
- **Cost**: 6
@@ -1303,7 +1371,7 @@ The notation A,B indicates that A and B are interpreted as a uint128 value, with
## b&
-- Opcode: 0xac
+- Bytecode: 0xac
- Stack: ..., A: []byte, B: []byte &rarr; ..., []byte
- A bitwise-and B. A and B are zero-left extended to the greater of their lengths
- **Cost**: 6
@@ -1311,7 +1379,7 @@ The notation A,B indicates that A and B are interpreted as a uint128 value, with
## b^
-- Opcode: 0xad
+- Bytecode: 0xad
- Stack: ..., A: []byte, B: []byte &rarr; ..., []byte
- A bitwise-xor B. A and B are zero-left extended to the greater of their lengths
- **Cost**: 6
@@ -1319,7 +1387,7 @@ The notation A,B indicates that A and B are interpreted as a uint128 value, with
## b~
-- Opcode: 0xae
+- Bytecode: 0xae
- Stack: ..., A: []byte &rarr; ..., []byte
- A with all bits inverted
- **Cost**: 4
@@ -1327,14 +1395,14 @@ The notation A,B indicates that A and B are interpreted as a uint128 value, with
## bzero
-- Opcode: 0xaf
+- Bytecode: 0xaf
- Stack: ..., A: uint64 &rarr; ..., []byte
- zero filled byte-array of length A
- Availability: v4
## log
-- Opcode: 0xb0
+- Bytecode: 0xb0
- Stack: ..., A: []byte &rarr; ...
- write A to log state of the current application
- Availability: v5
@@ -1344,7 +1412,7 @@ The notation A,B indicates that A and B are interpreted as a uint128 value, with
## itxn_begin
-- Opcode: 0xb1
+- Bytecode: 0xb1
- Stack: ... &rarr; ...
- begin preparation of a new inner transaction in a new transaction group
- Availability: v5
@@ -1352,9 +1420,10 @@ The notation A,B indicates that A and B are interpreted as a uint128 value, with
`itxn_begin` initializes Sender to the application address; Fee to the minimum allowable, taking into account MinTxnFee and credit from overpaying in earlier transactions; FirstValid/LastValid to the values in the invoking transaction, and all other fields to zero or empty values.
-## itxn_field f
+## itxn_field
-- Opcode: 0xb2 {uint8 transaction field index}
+- Syntax: `itxn_field F` ∋ F: [txn](#field-group-txn)
+- Bytecode: 0xb2 {uint8}
- Stack: ..., A &rarr; ...
- set field F of the current inner transaction to A
- Availability: v5
@@ -1364,7 +1433,7 @@ The notation A,B indicates that A and B are interpreted as a uint128 value, with
## itxn_submit
-- Opcode: 0xb3
+- Bytecode: 0xb3
- Stack: ... &rarr; ...
- execute the current inner transaction group. Fail if executing this group would exceed the inner transaction limit, or if any transaction in the group fails.
- Availability: v5
@@ -1372,17 +1441,19 @@ The notation A,B indicates that A and B are interpreted as a uint128 value, with
`itxn_submit` resets the current transaction so that it can not be resubmitted. A new `itxn_begin` is required to prepare another inner transaction.
-## itxn f
+## itxn
-- Opcode: 0xb4 {uint8 transaction field index}
+- Syntax: `itxn F` ∋ F: [txn](#field-group-txn)
+- Bytecode: 0xb4 {uint8}
- Stack: ... &rarr; ..., any
- field F of the last inner transaction
- Availability: v5
- Mode: Application
-## itxna f i
+## itxna
-- Opcode: 0xb5 {uint8 transaction field index} {uint8 transaction field array index}
+- Syntax: `itxna F I` ∋ F: [txna](#field-group-txna), I: a transaction field array index
+- Bytecode: 0xb5 {uint8}, {uint8}
- Stack: ... &rarr; ..., any
- Ith value of the array field F of the last inner transaction
- Availability: v5
@@ -1390,7 +1461,7 @@ The notation A,B indicates that A and B are interpreted as a uint128 value, with
## itxn_next
-- Opcode: 0xb6
+- Bytecode: 0xb6
- Stack: ... &rarr; ...
- begin preparation of a new inner transaction in the same transaction group
- Availability: v6
@@ -1398,17 +1469,19 @@ The notation A,B indicates that A and B are interpreted as a uint128 value, with
`itxn_next` initializes the transaction exactly as `itxn_begin` does
-## gitxn t f
+## gitxn
-- Opcode: 0xb7 {uint8 transaction group index} {uint8 transaction field index}
+- Syntax: `gitxn T F` ∋ T: transaction group index, F: [txn](#field-group-txn)
+- Bytecode: 0xb7 {uint8}, {uint8}
- Stack: ... &rarr; ..., any
- field F of the Tth transaction in the last inner group submitted
- Availability: v6
- Mode: Application
-## gitxna t f i
+## gitxna
-- Opcode: 0xb8 {uint8 transaction group index} {uint8 transaction field index} {uint8 transaction field array index}
+- Syntax: `gitxna T F I` ∋ T: transaction group index, F: [txna](#field-group-txna), I: transaction field array index
+- Bytecode: 0xb8 {uint8}, {uint8}, {uint8}
- Stack: ... &rarr; ..., any
- Ith value of the array field F from the Tth transaction in the last inner group submitted
- Availability: v6
@@ -1416,8 +1489,8 @@ The notation A,B indicates that A and B are interpreted as a uint128 value, with
## box_create
-- Opcode: 0xb9
-- Stack: ..., A: []byte, B: uint64 &rarr; ..., uint64
+- Bytecode: 0xb9
+- Stack: ..., A: boxName, B: uint64 &rarr; ..., bool
- create a box named A, of length B. Fail if A is empty or B exceeds 32,768. Returns 0 if A already existed, else 1
- Availability: v8
- Mode: Application
@@ -1426,40 +1499,40 @@ Newly created boxes are filled with 0 bytes. `box_create` will fail if the refer
## box_extract
-- Opcode: 0xba
-- Stack: ..., A: []byte, B: uint64, C: uint64 &rarr; ..., []byte
+- Bytecode: 0xba
+- Stack: ..., A: boxName, B: uint64, C: uint64 &rarr; ..., []byte
- read C bytes from box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size.
- Availability: v8
- Mode: Application
## box_replace
-- Opcode: 0xbb
-- Stack: ..., A: []byte, B: uint64, C: []byte &rarr; ...
+- Bytecode: 0xbb
+- Stack: ..., A: boxName, B: uint64, C: []byte &rarr; ...
- write byte-array C into box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size.
- Availability: v8
- Mode: Application
## box_del
-- Opcode: 0xbc
-- Stack: ..., A: []byte &rarr; ..., uint64
+- Bytecode: 0xbc
+- Stack: ..., A: boxName &rarr; ..., bool
- delete box named A if it exists. Return 1 if A existed, 0 otherwise
- Availability: v8
- Mode: Application
## box_len
-- Opcode: 0xbd
-- Stack: ..., A: []byte &rarr; ..., X: uint64, Y: uint64
+- Bytecode: 0xbd
+- Stack: ..., A: boxName &rarr; ..., X: uint64, Y: bool
- X is the length of box A if A exists, else 0. Y is 1 if A exists, else 0.
- Availability: v8
- Mode: Application
## box_get
-- Opcode: 0xbe
-- Stack: ..., A: []byte &rarr; ..., X: []byte, Y: uint64
+- Bytecode: 0xbe
+- Stack: ..., A: boxName &rarr; ..., X: []byte, Y: bool
- X is the contents of box A if A exists, else ''. Y is 1 if A exists, else 0.
- Availability: v8
- Mode: Application
@@ -1468,38 +1541,41 @@ For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `bo
## box_put
-- Opcode: 0xbf
-- Stack: ..., A: []byte, B: []byte &rarr; ...
+- Bytecode: 0xbf
+- Stack: ..., A: boxName, B: []byte &rarr; ...
- replaces the contents of box A with byte-array B. Fails if A exists and len(B) != len(box A). Creates A if it does not exist
- Availability: v8
- Mode: Application
For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `box_replace`
-## txnas f
+## txnas
-- Opcode: 0xc0 {uint8 transaction field index}
+- Syntax: `txnas F` ∋ F: [txna](#field-group-txna)
+- Bytecode: 0xc0 {uint8}
- Stack: ..., A: uint64 &rarr; ..., any
- Ath value of the array field F of the current transaction
- Availability: v5
-## gtxnas t f
+## gtxnas
-- Opcode: 0xc1 {uint8 transaction group index} {uint8 transaction field index}
+- Syntax: `gtxnas T F` ∋ T: transaction group index, F: [txna](#field-group-txna)
+- Bytecode: 0xc1 {uint8}, {uint8}
- Stack: ..., A: uint64 &rarr; ..., any
- Ath value of the array field F from the Tth transaction in the current group
- Availability: v5
-## gtxnsas f
+## gtxnsas
-- Opcode: 0xc2 {uint8 transaction field index}
+- Syntax: `gtxnsas F` ∋ F: [txna](#field-group-txna)
+- Bytecode: 0xc2 {uint8}
- Stack: ..., A: uint64, B: uint64 &rarr; ..., any
- Bth value of the array field F from the Ath transaction in the current group
- Availability: v5
## args
-- Opcode: 0xc3
+- Bytecode: 0xc3
- Stack: ..., A: uint64 &rarr; ..., []byte
- Ath LogicSig argument
- Availability: v5
@@ -1507,37 +1583,42 @@ For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `bo
## gloadss
-- Opcode: 0xc4
+- Bytecode: 0xc4
- Stack: ..., A: uint64, B: uint64 &rarr; ..., any
- Bth scratch space value of the Ath transaction in the current group
- Availability: v6
- Mode: Application
-## itxnas f
+## itxnas
-- Opcode: 0xc5 {uint8 transaction field index}
+- Syntax: `itxnas F` ∋ F: [txna](#field-group-txna)
+- Bytecode: 0xc5 {uint8}
- Stack: ..., A: uint64 &rarr; ..., any
- Ath value of the array field F of the last inner transaction
- Availability: v6
- Mode: Application
-## gitxnas t f
+## gitxnas
-- Opcode: 0xc6 {uint8 transaction group index} {uint8 transaction field index}
+- Syntax: `gitxnas T F` ∋ T: transaction group index, F: [txna](#field-group-txna)
+- Bytecode: 0xc6 {uint8}, {uint8}
- Stack: ..., A: uint64 &rarr; ..., any
- Ath value of the array field F from the Tth transaction in the last inner group submitted
- Availability: v6
- Mode: Application
-## vrf_verify s
+## vrf_verify
-- Opcode: 0xd0 {uint8 parameters index}
-- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., X: []byte, Y: uint64
+- Syntax: `vrf_verify S` ∋ S: [vrf_verify](#field-group-vrf_verify)
+- Bytecode: 0xd0 {uint8}
+- Stack: ..., A: []byte, B: []byte, C: []byte &rarr; ..., X: []byte, Y: bool
- Verify the proof B of message A against pubkey C. Returns vrf output and verification flag.
- **Cost**: 5700
- Availability: v7
-`vrf_verify` Standards:
+### vrf_verify
+
+Standards
| Index | Name | Notes |
| - | ------ | --------- |
@@ -1546,14 +1627,17 @@ For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `bo
`VrfAlgorand` is the VRF used in Algorand. It is ECVRF-ED25519-SHA512-Elligator2, specified in the IETF internet draft [draft-irtf-cfrg-vrf-03](https://datatracker.ietf.org/doc/draft-irtf-cfrg-vrf/03/).
-## block f
+## block
-- Opcode: 0xd1 {uint8 block field index}
+- Syntax: `block F` ∋ F: [block](#field-group-block)
+- Bytecode: 0xd1 {uint8}
- Stack: ..., A: uint64 &rarr; ..., any
- field F of block A. Fail unless A falls between txn.LastValid-1002 and txn.FirstValid (exclusive)
- Availability: v7
-`block` Fields:
+### block
+
+Fields
| Index | Name | Type | Notes |
| - | ------ | -- | --------- |
diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go
index f7979e682..86719cb6b 100644
--- a/data/transactions/logic/assembler.go
+++ b/data/transactions/logic/assembler.go
@@ -48,14 +48,13 @@ type Writer interface {
}
type labelReference struct {
- sourceLine int
-
- // position of the label reference
+ // position (PC) of the label reference
position int
- label string
+ // token holding the label name (and line, column)
+ label token
- // ending positions of the opcode containing the label reference.
+ // ending position of the opcode containing the label reference.
offsetPosition int
}
@@ -108,7 +107,7 @@ func (ref intReference) length(ops *OpStream, assembled []byte) (int, error) {
case opIntc:
return 2, nil
default:
- return 0, ops.lineErrorf(ops.OffsetToLine[ref.position], "Unexpected op at intReference: %d", assembled[ref.position])
+ return 0, errorLinef(ops.OffsetToLine[ref.position], "unexpected op at intReference: %d", assembled[ref.position])
}
}
@@ -177,7 +176,7 @@ func (ref byteReference) length(ops *OpStream, assembled []byte) (int, error) {
case opBytec:
return 2, nil
default:
- return 0, ops.lineErrorf(ops.OffsetToLine[ref.position], "Unexpected op at byteReference: %d", assembled[ref.position])
+ return 0, errorLinef(ops.OffsetToLine[ref.position], "unexpected op at byteReference: %d", assembled[ref.position])
}
}
@@ -219,9 +218,9 @@ func (ref byteReference) makeNewReference(ops *OpStream, singleton bool, newInde
type OpStream struct {
Version uint64
Trace *strings.Builder
- Warnings []error // informational warnings, shouldn't stop assembly
- Errors []lineError // errors that should prevent final assembly
- Program []byte // Final program bytes. Will stay nil if any errors
+ Warnings []error // informational warnings, shouldn't stop assembly
+ Errors []sourceError // errors that should prevent final assembly
+ Program []byte // Final program bytes. Will stay nil if any errors
// Running bytes as they are assembled. jumps must be resolved
// and cblocks added before these bytes become a legal program.
@@ -258,7 +257,7 @@ type OpStream struct {
// Need new copy for each opstream
versionedPseudoOps map[string]map[int]OpSpec
- macros map[string][]string
+ macros map[string][]token
}
// newOpStream constructs OpStream instances ready to invoke assemble. A new
@@ -269,12 +268,12 @@ func newOpStream(version uint64) OpStream {
OffsetToLine: make(map[int]int),
typeTracking: true,
Version: version,
- macros: make(map[string][]string),
+ macros: make(map[string][]token),
known: ProgramKnowledge{fp: -1},
}
for i := range o.known.scratchSpace {
- o.known.scratchSpace[i] = StackUint64
+ o.known.scratchSpace[i] = StackZeroUint64
}
return o
@@ -312,7 +311,7 @@ type ProgramKnowledge struct {
func (pgm *ProgramKnowledge) top() (StackType, bool) {
if len(pgm.stack) == 0 {
- return pgm.bottom, pgm.bottom != StackNone
+ return pgm.bottom, pgm.bottom.AVMType != avmNone
}
last := len(pgm.stack) - 1
return pgm.stack[last], true
@@ -357,9 +356,10 @@ func (pgm *ProgramKnowledge) reset() {
// createLabel inserts a label to point to the next instruction, reporting an
// error for a duplicate.
-func (ops *OpStream) createLabel(label string) {
+func (ops *OpStream) createLabel(withColon token) {
+ label := strings.TrimSuffix(withColon.str, ":")
if _, ok := ops.labels[label]; ok {
- ops.errorf("duplicate label %#v", label)
+ ops.record(withColon.errorf("duplicate label %#v", label))
}
ops.labels[label] = ops.pending.Len()
ops.known.label()
@@ -371,11 +371,11 @@ func (ops *OpStream) recordSourceLine() {
}
// referToLabel records an opcode label reference to resolve later
-func (ops *OpStream) referToLabel(pc int, label string, offsetPosition int) {
- ops.labelReferences = append(ops.labelReferences, labelReference{ops.sourceLine, pc, label, offsetPosition})
+func (ops *OpStream) referToLabel(pc int, label token, offsetPosition int) {
+ ops.labelReferences = append(ops.labelReferences, labelReference{pc, label, offsetPosition})
}
-type refineFunc func(pgm *ProgramKnowledge, immediates []string) (StackTypes, StackTypes, error)
+type refineFunc func(pgm *ProgramKnowledge, immediates []token) (StackTypes, StackTypes, error)
// returns allows opcodes like `txn` to be specific about their return value
// types, based on the field requested, rather than use Any as specified by
@@ -387,7 +387,7 @@ func (ops *OpStream) returns(spec *OpSpec, replacement StackType) {
end := len(ops.known.stack)
tip := ops.known.stack[end-len(spec.Return.Types):]
for i := range tip {
- if tip[i] == StackAny {
+ if tip[i].AVMType == avmAny {
tip[i] = replacement
return
}
@@ -396,8 +396,8 @@ func (ops *OpStream) returns(spec *OpSpec, replacement StackType) {
panic(fmt.Sprintf("%+v", spec))
}
-// Intc writes opcodes for loading a uint64 constant onto the stack.
-func (ops *OpStream) Intc(constIndex uint) {
+// writeIntc writes opcodes for loading a uint64 constant onto the stack.
+func (ops *OpStream) writeIntc(constIndex uint) error {
switch constIndex {
case 0:
ops.pending.WriteByte(OpsByName[ops.Version]["intc_0"].Opcode)
@@ -409,20 +409,20 @@ func (ops *OpStream) Intc(constIndex uint) {
ops.pending.WriteByte(OpsByName[ops.Version]["intc_3"].Opcode)
default:
if constIndex > 0xff {
- ops.error("cannot have more than 256 int constants")
+ return errors.New("cannot have more than 256 int constants")
}
ops.pending.WriteByte(OpsByName[ops.Version]["intc"].Opcode)
ops.pending.WriteByte(uint8(constIndex))
}
if constIndex >= uint(len(ops.intc)) {
- ops.errorf("intc %d is not defined", constIndex)
- } else {
- ops.trace("intc %d: %d", constIndex, ops.intc[constIndex])
+ return fmt.Errorf("intc %d is not defined", constIndex)
}
+ ops.trace("intc %d: %d", constIndex, ops.intc[constIndex])
+ return nil
}
-// IntLiteral writes opcodes for loading a uint literal
-func (ops *OpStream) IntLiteral(val uint64) {
+// intLiteral writes opcodes for loading a uint literal
+func (ops *OpStream) intLiteral(val uint64) error {
ops.hasPseudoInt = true
found := false
@@ -437,7 +437,7 @@ func (ops *OpStream) IntLiteral(val uint64) {
if !found {
if ops.cntIntcBlock > 0 {
- ops.errorf("int %d used without %d in intcblock", val, val)
+ return fmt.Errorf("value %d does not appear in existing intcblock", val)
}
constIndex = uint(len(ops.intc))
ops.intc = append(ops.intc, val)
@@ -446,11 +446,11 @@ func (ops *OpStream) IntLiteral(val uint64) {
value: val,
position: ops.pending.Len(),
})
- ops.Intc(constIndex)
+ return ops.writeIntc(constIndex)
}
-// Bytec writes opcodes for loading a []byte constant onto the stack.
-func (ops *OpStream) Bytec(constIndex uint) {
+// writeBytec writes opcodes for loading a []byte constant onto the stack.
+func (ops *OpStream) writeBytec(constIndex uint) error {
switch constIndex {
case 0:
ops.pending.WriteByte(OpsByName[ops.Version]["bytec_0"].Opcode)
@@ -462,21 +462,21 @@ func (ops *OpStream) Bytec(constIndex uint) {
ops.pending.WriteByte(OpsByName[ops.Version]["bytec_3"].Opcode)
default:
if constIndex > 0xff {
- ops.error("cannot have more than 256 byte constants")
+ return errors.New("cannot have more than 256 byte constants")
}
ops.pending.WriteByte(OpsByName[ops.Version]["bytec"].Opcode)
ops.pending.WriteByte(uint8(constIndex))
}
if constIndex >= uint(len(ops.bytec)) {
- ops.errorf("bytec %d is not defined", constIndex)
- } else {
- ops.trace("bytec %d %s", constIndex, hex.EncodeToString(ops.bytec[constIndex]))
+ return fmt.Errorf("bytec %d is not defined", constIndex)
}
+ ops.trace("bytec %d %s", constIndex, hex.EncodeToString(ops.bytec[constIndex]))
+ return nil
}
-// ByteLiteral writes opcodes and data for loading a []byte literal
+// byteLiteral writes opcodes and data for loading a []byte literal
// Values are accumulated so that they can be put into a bytecblock
-func (ops *OpStream) ByteLiteral(val []byte) {
+func (ops *OpStream) byteLiteral(val []byte) error {
ops.hasPseudoByte = true
found := false
@@ -490,7 +490,7 @@ func (ops *OpStream) ByteLiteral(val []byte) {
}
if !found {
if ops.cntBytecBlock > 0 {
- ops.errorf("byte/addr/method used without value in bytecblock")
+ return fmt.Errorf("value 0x%x does not appear in existing bytecblock", val)
}
constIndex = uint(len(ops.bytec))
ops.bytec = append(ops.bytec, val)
@@ -499,21 +499,21 @@ func (ops *OpStream) ByteLiteral(val []byte) {
value: val,
position: ops.pending.Len(),
})
- ops.Bytec(constIndex)
+ return ops.writeBytec(constIndex)
}
-func asmInt(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s needs one immediate argument, was given %d", spec.Name, len(args))
+func asmInt(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
+ if err := ops.checkArgCount(spec.Name, mnemonic, args, 1); err != nil {
+ return err
}
// After backBranchEnabledVersion, control flow is confusing, so if there's
// a manual cblock, use push instead of trying to use what's given.
if ops.cntIntcBlock > 0 && ops.Version >= backBranchEnabledVersion {
// We don't understand control-flow, so use pushint
- ops.warnf("int %s used with explicit intcblock. must pushint", args[0])
+ ops.warn(args[0], "int %s used with explicit intcblock. must pushint", args[0].str)
pushint := OpsByName[ops.Version]["pushint"]
- return asmPushInt(ops, &pushint, args)
+ return asmPushInt(ops, &pushint, mnemonic, args)
}
// There are no backjumps, but there are multiple cblocks. Maybe one is
@@ -521,65 +521,71 @@ func asmInt(ops *OpStream, spec *OpSpec, args []string) error {
if ops.cntIntcBlock > 1 {
pushint, ok := OpsByName[ops.Version]["pushint"]
if ok {
- return asmPushInt(ops, &pushint, args)
+ return asmPushInt(ops, &pushint, mnemonic, args)
}
- return ops.errorf("int %s used with manual intcblocks. Use intc.", args[0])
+ return mnemonic.errorf("int %s used with manual intcblocks. Use intc.", args[0].str)
}
// In both of the above clauses, we _could_ track whether a particular
// intcblock dominates the current instruction. If so, we could use it.
// check txn type constants
- i, ok := txnTypeMap[args[0]]
- if ok {
- ops.IntLiteral(i)
- return nil
+ i, ok := txnTypeMap[args[0].str]
+ if !ok {
+ // check OnCompletion constants
+ i, ok = onCompletionMap[args[0].str]
}
- // check OnCompletion constants
- oc, isOCStr := onCompletionMap[args[0]]
- if isOCStr {
- ops.IntLiteral(oc)
- return nil
+ if !ok {
+ val, err := strconv.ParseUint(args[0].str, 0, 64)
+ if err != nil {
+ return args[0].errorf("unable to parse %#v as integer", args[0].str)
+ }
+ i = val
}
- val, err := strconv.ParseUint(args[0], 0, 64)
+ err := ops.intLiteral(i)
if err != nil {
- return ops.error(err)
+ return args[0].error(err)
}
- ops.IntLiteral(val)
return nil
}
// Explicit invocation of const lookup and push
-func asmIntC(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s needs one immediate argument, was given %d", spec.Name, len(args))
+func asmIntC(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
+ if err := ops.checkArgCount(spec.Name, mnemonic, args, 1); err != nil {
+ return err
+ }
+ constIndex, err := byteImm(args[0].str, "constant")
+ if err != nil {
+ return args[0].error(err)
}
- constIndex, err := byteImm(args[0], "constant")
+ err = ops.writeIntc(uint(constIndex))
if err != nil {
- return ops.error(err)
+ return args[0].error(err)
}
- ops.Intc(uint(constIndex))
return nil
}
-func asmByteC(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s needs one immediate argument, was given %d", spec.Name, len(args))
+func asmByteC(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
+ if err := ops.checkArgCount(spec.Name, mnemonic, args, 1); err != nil {
+ return err
}
- constIndex, err := byteImm(args[0], "constant")
+ constIndex, err := byteImm(args[0].str, "constant")
if err != nil {
- return ops.error(err)
+ return args[0].error(err)
+ }
+ err = ops.writeBytec(uint(constIndex))
+ if err != nil {
+ return args[0].error(err)
}
- ops.Bytec(uint(constIndex))
return nil
}
-func asmPushInt(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s needs one immediate argument, was given %d", spec.Name, len(args))
+func asmPushInt(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
+ if err := ops.checkArgCount(spec.Name, mnemonic, args, 1); err != nil {
+ return err
}
- val, err := strconv.ParseUint(args[0], 0, 64)
+ val, err := strconv.ParseUint(args[0].str, 0, 64)
if err != nil {
- return ops.error(err)
+ return args[0].errorf("unable to parse %#v as integer", args[0].str)
}
ops.pending.WriteByte(spec.Opcode)
var scratch [binary.MaxVarintLen64]byte
@@ -588,22 +594,24 @@ func asmPushInt(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
-func asmPushInts(ops *OpStream, spec *OpSpec, args []string) error {
+func asmPushInts(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
ops.pending.WriteByte(spec.Opcode)
_, err := asmIntImmArgs(ops, args)
return err
}
-func asmPushBytes(ops *OpStream, spec *OpSpec, args []string) error {
+func asmPushBytes(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
+ // asmPushBytes is sometimes used to assemble the "byte" mnemonic, so use
+ // mnemonic.str instead of spec.Name when reporting errors.
if len(args) == 0 {
- return ops.errorf("%s needs byte literal argument", spec.Name)
+ return mnemonic.errorAfterf("%s needs byte literal argument", mnemonic.str)
}
val, consumed, err := parseBinaryArgs(args)
if err != nil {
- return ops.error(err)
+ return args[consumed].errorf("%s %w", mnemonic.str, err)
}
if len(args) != consumed {
- return ops.errorf("%s with extraneous argument", spec.Name)
+ return args[consumed].errorf("%s with extraneous argument", mnemonic.str)
}
ops.pending.WriteByte(spec.Opcode)
var scratch [binary.MaxVarintLen64]byte
@@ -613,9 +621,9 @@ func asmPushBytes(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
-func asmPushBytess(ops *OpStream, spec *OpSpec, args []string) error {
+func asmPushBytess(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
ops.pending.WriteByte(spec.Opcode)
- _, err := asmByteImmArgs(ops, args)
+ _, err := asmByteImmArgs(ops, spec, args)
return err
}
@@ -632,66 +640,73 @@ func base32DecodeAnyPadding(x string) (val []byte, err error) {
return
}
-func parseBinaryArgs(args []string) (val []byte, consumed int, err error) {
- arg := args[0]
+// parseBinaryArgs parses a byte literal argument. It returns the argument,
+// interpetted into raw bytes, and the number of tokens consumed.
+func parseBinaryArgs(args []token) ([]byte, int, error) {
+ arg := args[0].str
if strings.HasPrefix(arg, "base32(") || strings.HasPrefix(arg, "b32(") {
open := strings.IndexRune(arg, '(')
close := strings.IndexRune(arg, ')')
if close == -1 {
- err = errors.New("byte base32 arg lacks close paren")
- return
+ return nil, 0, fmt.Errorf("argument %s lacks closing parenthesis", arg)
}
- val, err = base32DecodeAnyPadding(arg[open+1 : close])
+ if close != len(arg)-1 {
+ return nil, 0, fmt.Errorf("argument %s must end at first closing parenthesis", arg)
+ }
+ val, err := base32DecodeAnyPadding(arg[open+1 : close])
if err != nil {
- return
+ if cie, ok := err.(base32.CorruptInputError); ok {
+ return nil, 0, base32.CorruptInputError(int64(cie) + int64(open) + 1)
+ }
+ return nil, 0, err
}
- consumed = 1
+ return val, 1, nil
} else if strings.HasPrefix(arg, "base64(") || strings.HasPrefix(arg, "b64(") {
open := strings.IndexRune(arg, '(')
close := strings.IndexRune(arg, ')')
if close == -1 {
- err = errors.New("byte base64 arg lacks close paren")
- return
+ return nil, 0, fmt.Errorf("argument %s lacks closing parenthesis", arg)
+ }
+ if close != len(arg)-1 {
+ return nil, 0, fmt.Errorf("argument %s must end at first closing parenthesis", arg)
}
- val, err = base64.StdEncoding.DecodeString(arg[open+1 : close])
+ val, err := base64.StdEncoding.DecodeString(arg[open+1 : close])
if err != nil {
- return
+ return nil, 0, err
}
- consumed = 1
+ return val, 1, nil
} else if strings.HasPrefix(arg, "0x") {
- val, err = hex.DecodeString(arg[2:])
+ val, err := hex.DecodeString(arg[2:])
if err != nil {
- return
+ return nil, 0, err
}
- consumed = 1
+ return val, 1, nil
} else if arg == "base32" || arg == "b32" {
if len(args) < 2 {
- err = fmt.Errorf("need literal after 'byte %s'", arg)
- return
+ return nil, 0, fmt.Errorf("%s needs byte literal argument", arg)
}
- val, err = base32DecodeAnyPadding(args[1])
+ val, err := base32DecodeAnyPadding(args[1].str)
if err != nil {
- return
+ return nil, 1, err // return 1, so that the right token is blamed
}
- consumed = 2
+ return val, 2, nil
} else if arg == "base64" || arg == "b64" {
if len(args) < 2 {
- err = fmt.Errorf("need literal after 'byte %s'", arg)
- return
+ return nil, 0, fmt.Errorf("%s needs byte literal argument", arg)
}
- val, err = base64.StdEncoding.DecodeString(args[1])
+ val, err := base64.StdEncoding.DecodeString(args[1].str)
if err != nil {
- return
+ return nil, 1, err
}
- consumed = 2
+ return val, 2, nil
} else if len(arg) > 1 && arg[0] == '"' && arg[len(arg)-1] == '"' {
- val, err = parseStringLiteral(arg)
- consumed = 1
- } else {
- err = fmt.Errorf("byte arg did not parse: %v", arg)
- return
+ val, err := parseStringLiteral(arg)
+ if err != nil {
+ return nil, 0, err
+ }
+ return val, 1, err
}
- return
+ return nil, 0, fmt.Errorf("arg did not parse: %v", arg)
}
func parseStringLiteral(input string) (result []byte, err error) {
@@ -736,7 +751,7 @@ func parseStringLiteral(input string) (result []byte, err error) {
pos++
continue
default:
- return nil, fmt.Errorf("invalid escape seq \\%c", char)
+ return nil, fmt.Errorf("invalid escape sequence \\%c", char)
}
}
if hexSeq {
@@ -766,28 +781,29 @@ func parseStringLiteral(input string) (result []byte, err error) {
// byte {base64,b64,base32,b32} ...
// byte 0x....
// byte "this is a string\n"
-func asmByte(ops *OpStream, spec *OpSpec, args []string) error {
+func asmByte(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
if len(args) == 0 {
- return ops.errorf("%s needs byte literal argument", spec.Name)
+ return mnemonic.errorAfterf("%s needs byte literal argument", spec.Name)
}
// After backBranchEnabledVersion, control flow is confusing, so if there's
// a manual cblock, use push instead of trying to use what's given.
if ops.cntBytecBlock > 0 && ops.Version >= backBranchEnabledVersion {
// We don't understand control-flow, so use pushbytes
- ops.warnf("byte %s used with explicit bytecblock. must pushbytes", args[0])
- pushbytes := OpsByName[ops.Version]["pushbytes"]
- return asmPushBytes(ops, &pushbytes, args)
+ ops.warn(args[0], "byte %s used with explicit bytecblock. must pushbytes", args[0].str)
+ pushbytes := OpsByName[ops.Version]["pushbytes"] // make sure pushbytes opcode is written
+ return asmPushBytes(ops, &pushbytes, mnemonic, args)
}
// There are no backjumps, but there are multiple cblocks. Maybe one is
// conditional skipped. Too confusing.
if ops.cntBytecBlock > 1 {
+ // use pushbytes opcode if available
pushbytes, ok := OpsByName[ops.Version]["pushbytes"]
if ok {
- return asmPushBytes(ops, &pushbytes, args)
+ return asmPushBytes(ops, &pushbytes, mnemonic, args)
}
- return ops.errorf("byte %s used with manual bytecblocks. Use bytec.", args[0])
+ return args[0].errorf("byte %s used with manual bytecblocks. Use bytec.", args[0].str)
}
// In both of the above clauses, we _could_ track whether a particular
@@ -795,49 +811,55 @@ func asmByte(ops *OpStream, spec *OpSpec, args []string) error {
val, consumed, err := parseBinaryArgs(args)
if err != nil {
- return ops.error(err)
+ return args[consumed].errorf("%s %w", spec.Name, err)
}
if len(args) != consumed {
- return ops.errorf("%s with extraneous argument", spec.Name)
+ return args[consumed].errorf("%s with extraneous argument", spec.Name)
+ }
+ err = ops.byteLiteral(val)
+ if err != nil {
+ return args[0].error(err)
}
- ops.ByteLiteral(val)
return nil
}
// method "add(uint64,uint64)uint64"
-func asmMethod(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) == 0 {
- return ops.error("method requires a literal argument")
+func asmMethod(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
+ if err := ops.checkArgCount(spec.Name, mnemonic, args, 1); err != nil {
+ return err
}
- arg := args[0]
+ arg := args[0].str
if len(arg) > 1 && arg[0] == '"' && arg[len(arg)-1] == '"' {
methodSig, err := parseStringLiteral(arg)
if err != nil {
- return ops.error(err)
+ return args[0].error(err)
}
methodSigStr := string(methodSig)
err = abi.VerifyMethodSignature(methodSigStr)
if err != nil {
// Warn if an invalid signature is used. Don't return an error, since the ABI is not
// governed by the core protocol, so there may be changes to it that we don't know about
- ops.warnf("Invalid ARC-4 ABI method signature for method op: %s", err.Error())
+ ops.warn(args[0], "invalid ARC-4 ABI method signature for method op: %w", err)
}
hash := sha512.Sum512_256(methodSig)
- ops.ByteLiteral(hash[0:4])
+ err = ops.byteLiteral(hash[:4])
+ if err != nil {
+ return args[0].error(err)
+ }
return nil
}
- return ops.error("Unable to parse method signature")
+ return args[0].errorf("unable to parse method signature")
}
-func asmIntImmArgs(ops *OpStream, args []string) ([]uint64, error) {
+func asmIntImmArgs(ops *OpStream, args []token) ([]uint64, *sourceError) {
ivals := make([]uint64, len(args))
var scratch [binary.MaxVarintLen64]byte
l := binary.PutUvarint(scratch[:], uint64(len(args)))
ops.pending.Write(scratch[:l])
for i, xs := range args {
- cu, err := strconv.ParseUint(xs, 0, 64)
+ cu, err := strconv.ParseUint(xs.str, 0, 64)
if err != nil {
- ops.error(err)
+ ops.record(xs.error(err))
}
l = binary.PutUvarint(scratch[:], cu)
ops.pending.Write(scratch[:l])
@@ -847,7 +869,7 @@ func asmIntImmArgs(ops *OpStream, args []string) ([]uint64, error) {
return ivals, nil
}
-func asmIntCBlock(ops *OpStream, spec *OpSpec, args []string) error {
+func asmIntCBlock(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
ops.pending.WriteByte(spec.Opcode)
ivals, err := asmIntImmArgs(ops, args)
if err != nil {
@@ -857,7 +879,7 @@ func asmIntCBlock(ops *OpStream, spec *OpSpec, args []string) error {
// If we previously processed an `int`, we thought we could insert our
// own intcblock, but now we see a manual one.
if ops.hasPseudoInt {
- ops.error("intcblock following int")
+ return mnemonic.errorf("intcblock following int")
}
ops.intcRefs = nil
ops.intc = ivals
@@ -867,7 +889,7 @@ func asmIntCBlock(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
-func asmByteImmArgs(ops *OpStream, args []string) ([][]byte, error) {
+func asmByteImmArgs(ops *OpStream, spec *OpSpec, args []token) ([][]byte, *sourceError) {
bvals := make([][]byte, 0, len(args))
rest := args
for len(rest) > 0 {
@@ -877,7 +899,7 @@ func asmByteImmArgs(ops *OpStream, args []string) ([][]byte, error) {
// intcblock, but parseBinaryArgs would have
// to return a useful consumed value even in
// the face of errors. Hard.
- return nil, ops.error(err)
+ return nil, rest[0].errorf("%s %w", spec.Name, err)
}
bvals = append(bvals, val)
rest = rest[consumed:]
@@ -894,9 +916,9 @@ func asmByteImmArgs(ops *OpStream, args []string) ([][]byte, error) {
return bvals, nil
}
-func asmByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
+func asmByteCBlock(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
ops.pending.WriteByte(spec.Opcode)
- bvals, err := asmByteImmArgs(ops, args)
+ bvals, err := asmByteImmArgs(ops, spec, args)
if err != nil {
return err
}
@@ -905,7 +927,7 @@ func asmByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
// If we previously processed a pseudo `byte`, we thought we could
// insert our own bytecblock, but now we see a manual one.
if ops.hasPseudoByte {
- ops.error("bytecblock following byte/addr/method")
+ return mnemonic.errorf("bytecblock following byte/addr/method")
}
ops.bytecRefs = nil
ops.bytec = bvals
@@ -916,25 +938,28 @@ func asmByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
// addr A1EU...
// parses base32-with-checksum account address strings into a byte literal
-func asmAddr(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s needs one immediate argument, was given %d", spec.Name, len(args))
+func asmAddr(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
+ if err := ops.checkArgCount(spec.Name, mnemonic, args, 1); err != nil {
+ return err
+ }
+ addr, err := basics.UnmarshalChecksumAddress(args[0].str)
+ if err != nil {
+ return args[0].error(err)
}
- addr, err := basics.UnmarshalChecksumAddress(args[0])
+ err = ops.byteLiteral(addr[:])
if err != nil {
- return ops.error(err)
+ return args[0].error(err)
}
- ops.ByteLiteral(addr[:])
return nil
}
-func asmArg(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s needs one immediate argument, was given %d", spec.Name, len(args))
+func asmArg(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
+ if err := ops.checkArgCount(spec.Name, mnemonic, args, 1); err != nil {
+ return err
}
- val, err := byteImm(args[0], "argument")
+ val, err := byteImm(args[0].str, "argument")
if err != nil {
- return ops.error(err)
+ return args[0].error(err)
}
altSpec := *spec
if val < 4 {
@@ -948,14 +973,14 @@ func asmArg(ops *OpStream, spec *OpSpec, args []string) error {
case 3:
altSpec = OpsByName[ops.Version]["arg_3"]
}
- args = []string{}
+ args = []token{}
}
- return asmDefault(ops, &altSpec, args)
+ return asmDefault(ops, &altSpec, mnemonic, args)
}
-func asmBranch(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s needs a single label argument", spec.Name)
+func asmBranch(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
+ if err := ops.checkArgCount(spec.Name, mnemonic, args, 1); err != nil {
+ return err
}
ops.referToLabel(ops.pending.Len()+1, args[0], ops.pending.Len()+spec.Size)
@@ -966,10 +991,10 @@ func asmBranch(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
-func asmSwitch(ops *OpStream, spec *OpSpec, args []string) error {
+func asmSwitch(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
numOffsets := len(args)
if numOffsets > math.MaxUint8 {
- return ops.errorf("%s cannot take more than 255 labels", spec.Name)
+ return args[math.MaxUint8].errorf("%s cannot take more than 255 labels", spec.Name)
}
ops.pending.WriteByte(spec.Opcode)
ops.pending.WriteByte(byte(numOffsets))
@@ -983,16 +1008,16 @@ func asmSwitch(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
-func asmSubstring(ops *OpStream, spec *OpSpec, args []string) error {
- err := asmDefault(ops, spec, args)
+func asmSubstring(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
+ err := asmDefault(ops, spec, mnemonic, args)
if err != nil {
return err
}
// Having run asmDefault, only need to check extra constraints.
- start, _ := strconv.ParseUint(args[0], 0, 64)
- end, _ := strconv.ParseUint(args[1], 0, 64)
+ start, _ := strconv.ParseUint(args[0].str, 0, 64)
+ end, _ := strconv.ParseUint(args[1].str, 0, 64)
if end < start {
- return ops.error("substring end is before start")
+ return args[0].errorf("substring end is before start")
}
return nil
}
@@ -1016,58 +1041,74 @@ func int8Imm(value string, label string) (byte, error) {
return byte(res), err
}
-func asmItxn(ops *OpStream, spec *OpSpec, args []string) error {
+func asmItxn(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
if len(args) == 1 {
- return asmDefault(ops, spec, args)
+ return asmDefault(ops, spec, mnemonic, args)
}
if len(args) == 2 {
itxna := OpsByName[ops.Version]["itxna"]
- return asmDefault(ops, &itxna, args)
+ return asmDefault(ops, &itxna, mnemonic, args)
}
- return ops.errorf("%s expects 1 or 2 immediate arguments", spec.Name)
+ return mnemonic.errorf("%s expects 1 or 2 immediate arguments", spec.Name)
}
// asmGitxn substitutes gitna's spec if the are 3 args
-func asmGitxn(ops *OpStream, spec *OpSpec, args []string) error {
+func asmGitxn(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
if len(args) == 2 {
- return asmDefault(ops, spec, args)
+ return asmDefault(ops, spec, mnemonic, args)
}
if len(args) == 3 {
itxna := OpsByName[ops.Version]["gitxna"]
- return asmDefault(ops, &itxna, args)
+ return asmDefault(ops, &itxna, mnemonic, args)
}
- return ops.errorf("%s expects 2 or 3 immediate arguments", spec.Name)
+ return mnemonic.errorf("%s expects 2 or 3 immediate arguments", spec.Name)
}
-func asmItxnField(ops *OpStream, spec *OpSpec, args []string) error {
- if len(args) != 1 {
- return ops.errorf("%s expects one argument", spec.Name)
+func asmItxnField(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
+ if err := ops.checkArgCount(spec.Name, mnemonic, args, 1); err != nil {
+ return err
}
- fs, ok := txnFieldSpecByName[args[0]]
+ fs, ok := txnFieldSpecByName[args[0].str]
if !ok {
- return ops.errorf("%s unknown field: %#v", spec.Name, args[0])
+ return args[0].errorf("%s unknown field: %#v", spec.Name, args[0].str)
}
if fs.itxVersion == 0 {
- return ops.errorf("%s %#v is not allowed.", spec.Name, args[0])
+ return args[0].errorf("%s %#v is not allowed.", spec.Name, args[0].str)
}
if fs.itxVersion > ops.Version {
- return ops.errorf("%s %s field was introduced in v%d. Missed #pragma version?", spec.Name, args[0], fs.itxVersion)
+ return args[0].errorf("%s %s field was introduced in v%d. Missed #pragma version?", spec.Name, args[0].str, fs.itxVersion)
}
ops.pending.WriteByte(spec.Opcode)
ops.pending.WriteByte(fs.Field())
return nil
}
-type asmFunc func(*OpStream, *OpSpec, []string) error
+type asmFunc func(*OpStream, *OpSpec, token, []token) *sourceError
-// Basic assembly. Any extra bytes of opcode are encoded as byte immediates.
-func asmDefault(ops *OpStream, spec *OpSpec, args []string) error {
- expected := len(spec.OpDetails.Immediates)
- if len(args) != expected {
+func (ops *OpStream) checkArgCount(name string, mnemonic token, args []token, expected int) *sourceError {
+ offered := len(args)
+ if offered != expected {
+ all := make([]token, len(args)+1)
+ all[0] = mnemonic
+ copy(all[1:], args)
+ line := all[offered].line
+ col := all[offered].col + len(all[offered].str) // end of last arg (or mnemonic)
+ if offered > expected {
+ line = all[expected+1].line
+ col = all[expected+1].col // start of first extra arg
+ }
if expected == 1 {
- return ops.errorf("%s expects 1 immediate argument", spec.Name)
+ return &sourceError{line, col, fmt.Errorf("%s expects 1 immediate argument", name)}
}
- return ops.errorf("%s expects %d immediate arguments", spec.Name, expected)
+ return &sourceError{line, col, fmt.Errorf("%s expects %d immediate arguments", name, expected)}
+ }
+ return nil
+}
+
+// Basic assembly. Any extra bytes of opcode are encoded as byte immediates.
+func asmDefault(ops *OpStream, spec *OpSpec, mnemonic token, args []token) *sourceError {
+ if err := ops.checkArgCount(spec.Name, mnemonic, args, len(spec.OpDetails.Immediates)); err != nil {
+ return err
}
ops.pending.WriteByte(spec.Opcode)
for i, imm := range spec.OpDetails.Immediates {
@@ -1077,9 +1118,9 @@ func asmDefault(ops *OpStream, spec *OpSpec, args []string) error {
switch imm.kind {
case immByte:
if imm.Group != nil {
- fs, ok := imm.Group.SpecByName(args[i])
+ fs, ok := imm.Group.SpecByName(args[i].str)
if !ok {
- _, err := byteImm(args[i], "")
+ _, err := byteImm(args[i].str, "")
if err == nil {
// User supplied a uint, so we see if any of the other immediates take uints
for j, otherImm := range spec.OpDetails.Immediates {
@@ -1092,43 +1133,44 @@ func asmDefault(ops *OpStream, spec *OpSpec, args []string) error {
if isPseudoName {
errMsg += " with " + joinIntsOnOr("immediate", len(args))
}
- return ops.errorf("%s can only use %#v as immediate %s", errMsg, args[i], strings.Join(correctImmediates, " or "))
+ return args[i].errorf("%s can only use %#v as immediate %s",
+ errMsg, args[i].str, strings.Join(correctImmediates, " or "))
}
}
if isPseudoName {
for numImms, ps := range pseudos {
for _, psImm := range ps.OpDetails.Immediates {
if psImm.kind == immByte && psImm.Group != nil {
- if _, ok := psImm.Group.SpecByName(args[i]); ok {
+ if _, ok := psImm.Group.SpecByName(args[i].str); ok {
numImmediatesWithField = append(numImmediatesWithField, numImms)
}
}
}
}
if len(numImmediatesWithField) > 0 {
- return ops.errorf("%#v field of %s can only be used with %s", args[i], spec.Name, joinIntsOnOr("immediate", numImmediatesWithField...))
+ return args[i].errorf("%#v field of %s can only be used with %s", args[i].str, spec.Name, joinIntsOnOr("immediate", numImmediatesWithField...))
}
}
- return ops.errorf("%s unknown field: %#v", spec.Name, args[i])
+ return args[i].errorf("%s unknown field: %#v", spec.Name, args[i].str)
}
// refine the typestack now, so it is maintained even if there's a version error
if fs.Type().Typed() {
ops.returns(spec, fs.Type())
}
if fs.Version() > ops.Version {
- return ops.errorf("%s %s field was introduced in v%d. Missed #pragma version?",
- spec.Name, args[i], fs.Version())
+ return args[i].errorf("%s %s field was introduced in v%d. Missed #pragma version?",
+ spec.Name, args[i].str, fs.Version())
}
ops.pending.WriteByte(fs.Field())
} else {
// simple immediate that must be a number from 0-255
- val, err := byteImm(args[i], imm.Name)
+ val, err := byteImm(args[i].str, imm.Name)
if err != nil {
if strings.Contains(err.Error(), "unable to parse") {
// Perhaps the field works in a different order
for j, otherImm := range spec.OpDetails.Immediates {
if otherImm.kind == immByte && otherImm.Group != nil {
- if _, match := otherImm.Group.SpecByName(args[i]); match {
+ if _, match := otherImm.Group.SpecByName(args[i].str); match {
correctImmediates = append(correctImmediates, strconv.Itoa(j+1))
}
}
@@ -1138,37 +1180,47 @@ func asmDefault(ops *OpStream, spec *OpSpec, args []string) error {
if isPseudoName {
errMsg += " with " + joinIntsOnOr("immediate", len(args))
}
- return ops.errorf("%s can only use %#v as immediate %s", errMsg, args[i], strings.Join(correctImmediates, " or "))
+ return args[i].errorf("%s can only use %#v as immediate %s", errMsg, args[i].str, strings.Join(correctImmediates, " or "))
}
}
- return ops.errorf("%s %w", spec.Name, err)
+ return args[i].errorf("%s %w", spec.Name, err)
}
ops.pending.WriteByte(val)
}
case immInt8:
- val, err := int8Imm(args[i], imm.Name)
+ val, err := int8Imm(args[i].str, imm.Name)
if err != nil {
- return ops.errorf("%s %w", spec.Name, err)
+ return args[i].errorf("%s %w", spec.Name, err)
}
ops.pending.WriteByte(val)
default:
- return ops.errorf("unable to assemble immKind %d", imm.kind)
+ return args[i].errorf("unable to assemble immKind %d", imm.kind)
}
}
return nil
}
-// getImm interprets the arg at index argIndex as an immediate
-func getImm(args []string, argIndex int) (int, bool) {
+// getImm interprets the arg at index argIndex as an immediate that must be
+// between -128 and 127 (if signed=true) or between 0 and 255 (if signed=false)
+func getImm(args []token, argIndex int, signed bool) (int, bool) {
if len(args) <= argIndex {
return 0, false
}
// We want to parse anything from -128 up to 255. So allow 9 bits.
// Normal assembly checking will catch signed as byte, vice versa
- n, err := strconv.ParseInt(args[argIndex], 0, 9)
+ n, err := strconv.ParseInt(args[argIndex].str, 0, 9)
if err != nil {
return 0, false
}
+ if signed {
+ if n < -128 || n > 127 {
+ return 0, false
+ }
+ } else {
+ if n < 0 || n > 255 {
+ return 0, false
+ }
+ }
return int(n), true
}
@@ -1180,7 +1232,7 @@ func anyTypes(n int) StackTypes {
return as
}
-func typeSwap(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+func typeSwap(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
swapped := StackTypes{StackAny, StackAny}
top := len(pgm.stack) - 1
if top >= 0 {
@@ -1192,8 +1244,8 @@ func typeSwap(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, err
return nil, swapped, nil
}
-func typeDig(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
- n, ok := getImm(args, 0)
+func typeDig(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
+ n, ok := getImm(args, 0, false)
if !ok {
return nil, nil, nil
}
@@ -1209,8 +1261,8 @@ func typeDig(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, erro
return anyTypes(depth), returns, nil
}
-func typeBury(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
- n, ok := getImm(args, 0)
+func typeBury(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
+ n, ok := getImm(args, 0, false)
if !ok {
return nil, nil, nil
}
@@ -1226,7 +1278,7 @@ func typeBury(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, err
idx := top - n
if idx < 0 {
- if pgm.bottom == StackNone {
+ if pgm.bottom.AVMType == avmNone {
// By demanding n+1 elements, we'll trigger an error
return anyTypes(n + 1), nil, nil
}
@@ -1241,8 +1293,8 @@ func typeBury(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, err
return pgm.stack[idx:], returns, nil
}
-func typeFrameDig(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
- n, ok := getImm(args, 0)
+func typeFrameDig(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
+ n, ok := getImm(args, 0, true)
if !ok {
return nil, nil, nil
}
@@ -1262,8 +1314,8 @@ func typeFrameDig(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes,
return nil, StackTypes{pgm.stack[idx]}, nil
}
-func typeFrameBury(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
- n, ok := getImm(args, 0)
+func typeFrameBury(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
+ n, ok := getImm(args, 0, true)
if !ok {
return nil, nil, nil
}
@@ -1303,16 +1355,18 @@ func typeFrameBury(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes
return pgm.stack[idx:], returns, nil
}
-func typeEquals(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+func typeEquals(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
top := len(pgm.stack) - 1
if top >= 0 {
- //Require arg0 and arg1 to have same type
- return StackTypes{pgm.stack[top], pgm.stack[top]}, nil, nil
+ // Require arg0 and arg1 to have same avm type
+ // but the bounds shouldn't matter
+ widened := pgm.stack[top].widened()
+ return StackTypes{widened, widened}, nil, nil
}
return nil, nil, nil
}
-func typeDup(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+func typeDup(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
top := len(pgm.stack) - 1
if top >= 0 {
return nil, StackTypes{pgm.stack[top], pgm.stack[top]}, nil
@@ -1320,7 +1374,7 @@ func typeDup(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, erro
return nil, nil, nil
}
-func typeDupTwo(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+func typeDupTwo(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
topTwo := StackTypes{StackAny, StackAny}
top := len(pgm.stack) - 1
if top >= 0 {
@@ -1332,17 +1386,15 @@ func typeDupTwo(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, e
return nil, append(topTwo, topTwo...), nil
}
-func typeSelect(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+func typeSelect(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
top := len(pgm.stack) - 1
if top >= 2 {
- if pgm.stack[top-1] == pgm.stack[top-2] {
- return nil, StackTypes{pgm.stack[top-1]}, nil
- }
+ return nil, StackTypes{pgm.stack[top-1].union(pgm.stack[top-2])}, nil
}
return nil, nil, nil
}
-func typeSetBit(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+func typeSetBit(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
top := len(pgm.stack) - 1
if top >= 2 {
return nil, StackTypes{pgm.stack[top-2]}, nil
@@ -1350,8 +1402,8 @@ func typeSetBit(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, e
return nil, nil, nil
}
-func typeCover(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
- n, ok := getImm(args, 0)
+func typeCover(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
+ n, ok := getImm(args, 0, false)
if !ok {
return nil, nil, nil
}
@@ -1372,8 +1424,8 @@ func typeCover(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, er
return anyTypes(depth), returns, nil
}
-func typeUncover(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
- n, ok := getImm(args, 0)
+func typeUncover(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
+ n, ok := getImm(args, 0, false)
if !ok {
return nil, nil, nil
}
@@ -1390,19 +1442,25 @@ func typeUncover(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes,
return anyTypes(depth), returns, nil
}
-func typeTxField(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+func typeByteMath(resultSize uint64) refineFunc {
+ return func(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
+ return nil, StackTypes{NewStackType(avmBytes, bound(0, resultSize))}, nil
+ }
+}
+
+func typeTxField(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
if len(args) != 1 {
return nil, nil, nil
}
- fs, ok := txnFieldSpecByName[args[0]]
+ fs, ok := txnFieldSpecByName[args[0].str]
if !ok {
return nil, nil, nil
}
return StackTypes{fs.ftype}, nil, nil
}
-func typeStore(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
- scratchIndex, ok := getImm(args, 0)
+func typeStore(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
+ scratchIndex, ok := getImm(args, 0, false)
if !ok {
return nil, nil, nil
}
@@ -1413,36 +1471,45 @@ func typeStore(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, er
return nil, nil, nil
}
-func typeStores(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+func typeStores(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
top := len(pgm.stack) - 1
if top < 0 {
return nil, nil, nil
}
- for i := range pgm.scratchSpace {
- // We can't know what slot stacktop is being stored in, but we can at least keep the slots that are the same type as stacktop
- if pgm.scratchSpace[i] != pgm.stack[top] {
- pgm.scratchSpace[i] = StackAny
+
+ // If the index of the scratch slot is a const
+ // we can modify only that scratch slots type
+ if top >= 1 {
+ if idx, isConst := pgm.stack[top-1].constant(); isConst {
+ pgm.scratchSpace[idx] = pgm.stack[top]
+ return nil, nil, nil
}
}
+
+ for i := range pgm.scratchSpace {
+ // We can't know what slot stacktop is being stored in
+ // so we adjust the bounds and type of each slot as if the stacktop type were stored there.
+ pgm.scratchSpace[i] = pgm.scratchSpace[i].union(pgm.stack[top])
+ }
return nil, nil, nil
}
-func typeLoad(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
- scratchIndex, ok := getImm(args, 0)
+func typeLoad(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
+ scratchIndex, ok := getImm(args, 0, false)
if !ok {
return nil, nil, nil
}
return nil, StackTypes{pgm.scratchSpace[scratchIndex]}, nil
}
-func typeProto(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
- a, aok := getImm(args, 0)
- _, rok := getImm(args, 1)
+func typeProto(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
+ a, aok := getImm(args, 0, false)
+ _, rok := getImm(args, 1, false)
if !aok || !rok {
return nil, nil, nil
}
- if len(pgm.stack) != 0 || pgm.bottom != StackAny {
+ if len(pgm.stack) != 0 || pgm.bottom.AVMType != avmAny {
return nil, nil, fmt.Errorf("proto must be unreachable from previous PC")
}
pgm.stack = anyTypes(a)
@@ -1450,7 +1517,16 @@ func typeProto(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, er
return nil, nil, nil
}
-func typeLoads(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+func typeLoads(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
+ top := len(pgm.stack) - 1
+ if top < 0 {
+ return nil, nil, nil
+ }
+
+ if val, isConst := pgm.stack[top].constant(); isConst {
+ return nil, StackTypes{pgm.scratchSpace[val]}, nil
+ }
+
scratchType := pgm.scratchSpace[0]
for _, item := range pgm.scratchSpace {
// If all the scratch slots are one type, then we can say we are loading that type
@@ -1461,16 +1537,16 @@ func typeLoads(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, er
return nil, StackTypes{scratchType}, nil
}
-func typePopN(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
- n, ok := getImm(args, 0)
+func typePopN(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
+ n, ok := getImm(args, 0, false)
if !ok {
return nil, nil, nil
}
return anyTypes(n), nil, nil
}
-func typeDupN(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
- n, ok := getImm(args, 0)
+func typeDupN(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
+ n, ok := getImm(args, 0, false)
if !ok {
return nil, nil, nil
}
@@ -1488,7 +1564,7 @@ func typeDupN(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, err
return nil, copies, nil
}
-func typePushBytess(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+func typePushBytess(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
types := make(StackTypes, len(args))
for i := range types {
types[i] = StackBytes
@@ -1497,7 +1573,7 @@ func typePushBytess(pgm *ProgramKnowledge, args []string) (StackTypes, StackType
return nil, types, nil
}
-func typePushInts(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes, error) {
+func typePushInts(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
types := make(StackTypes, len(args))
for i := range types {
types[i] = StackUint64
@@ -1506,6 +1582,33 @@ func typePushInts(pgm *ProgramKnowledge, args []string) (StackTypes, StackTypes,
return nil, types, nil
}
+func typePushInt(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
+ types := make(StackTypes, len(args))
+ for i := range types {
+ val, err := strconv.ParseUint(args[i].str, 10, 64)
+ if err != nil {
+ types[i] = StackUint64
+ } else {
+ types[i] = NewStackType(avmUint64, bound(val, val))
+ }
+ }
+ return nil, types, nil
+}
+
+func typeBzero(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
+ // Bzero should only allow its input int to be up to maxStringSize bytes
+ return StackTypes{StackUint64.narrowed(bound(0, maxStringSize))}, StackTypes{StackBytes}, nil
+}
+
+func typeByte(pgm *ProgramKnowledge, args []token) (StackTypes, StackTypes, error) {
+ if len(args) == 0 {
+ return nil, StackTypes{StackBytes}, nil
+ }
+ val, _, _ := parseBinaryArgs(args)
+ l := uint64(len(val))
+ return nil, StackTypes{NewStackType(avmBytes, static(l), fmt.Sprintf("[%d]byte", l))}, nil
+}
+
func joinIntsOnOr(singularTerminator string, list ...int) string {
if len(list) == 1 {
switch list[0] {
@@ -1529,55 +1632,81 @@ func joinIntsOnOr(singularTerminator string, list ...int) string {
return errMsg + singularTerminator + "s"
}
-func pseudoImmediatesError(ops *OpStream, name string, specs map[int]OpSpec) {
+func pseudoImmediatesError(ops *OpStream, mnemonic token, specs map[int]OpSpec) {
immediateCounts := make([]int, len(specs))
i := 0
for numImms := range specs {
immediateCounts[i] = numImms
i++
}
- ops.error(name + " expects " + joinIntsOnOr("immediate argument", immediateCounts...))
+ ops.record(mnemonic.errorf("%s expects %s", mnemonic.str, joinIntsOnOr("immediate argument", immediateCounts...)))
}
// getSpec finds the OpSpec we need during assembly based on its name, our current version, and the immediates passed in
// Note getSpec handles both normal OpSpecs and those supplied by versionedPseudoOps
// The returned string is the spec's name, annotated if it was a pseudoOp with no immediates to help disambiguate typetracking errors
-func getSpec(ops *OpStream, name string, args []string) (OpSpec, string, bool) {
+func getSpec(ops *OpStream, mnemonic token, argCount int) (OpSpec, string, bool) {
+ name := mnemonic.str
pseudoSpecs, ok := ops.versionedPseudoOps[name]
if ok {
- pseudo, ok := pseudoSpecs[len(args)]
+ pseudo, ok := pseudoSpecs[argCount]
if !ok {
// Could be that pseudoOp wants to handle immediates itself so check -1 key
pseudo, ok = pseudoSpecs[anyImmediates]
if !ok {
// Number of immediates supplied did not match any of the pseudoOps of the given name, so we try to construct a mock spec that can be used to track types
- pseudoImmediatesError(ops, name, pseudoSpecs)
+ pseudoImmediatesError(ops, mnemonic, pseudoSpecs)
proto, version, ok := mergeProtos(pseudoSpecs)
if !ok {
return OpSpec{}, "", false
}
- pseudo = OpSpec{Name: name, Proto: proto, Version: version, OpDetails: OpDetails{asm: func(*OpStream, *OpSpec, []string) error { return nil }}}
+ pseudo = OpSpec{Name: name, Proto: proto, Version: version, OpDetails: OpDetails{
+ asm: func(*OpStream, *OpSpec, token, []token) *sourceError { return nil },
+ }}
}
}
pseudo.Name = name
if pseudo.Version > ops.Version {
- ops.errorf("%s opcode with %s was introduced in v%d", pseudo.Name, joinIntsOnOr("immediate", len(args)), pseudo.Version)
+ ops.record(mnemonic.errorf("%s opcode with %s was introduced in v%d",
+ pseudo.Name, joinIntsOnOr("immediate", argCount), pseudo.Version))
}
- if len(args) == 0 {
+ if argCount == 0 {
return pseudo, pseudo.Name + " without immediates", true
}
return pseudo, pseudo.Name, true
}
spec, ok := OpsByName[ops.Version][name]
if !ok {
- spec, ok = OpsByName[AssemblerMaxVersion][name]
+ var err error
+ spec, err = unknownOpcodeComplaint(name, ops.Version)
+ // unknownOpcodeComplaint's job is to return a nice error, so err != nil
+ ops.record(mnemonic.error(err))
+ }
+ return spec, spec.Name, ok
+}
+
+// unknownOpcodeComplaint returns the best error it can for a missing opcode,
+// plus a "standin" OpSpec, if possible.
+func unknownOpcodeComplaint(name string, v uint64) (OpSpec, error) {
+ first, last := -1, -1
+ var standin OpSpec
+ for i := 1; i < len(OpsByName); i++ {
+ spec, ok := OpsByName[i][name]
if ok {
- ops.errorf("%s opcode was introduced in v%d", name, spec.Version)
- } else {
- ops.errorf("unknown opcode: %s", name)
+ standin = spec
+ if first == -1 {
+ first = i
+ }
+ last = i
}
}
- return spec, spec.Name, ok
+ if first > int(v) {
+ return standin, fmt.Errorf("%s opcode was introduced in v%d", name, first)
+ }
+ if last != -1 && last < int(v) {
+ return standin, fmt.Errorf("%s opcode was removed in v%d", name, last+1)
+ }
+ return OpSpec{}, fmt.Errorf("unknown opcode: %s", name)
}
// pseudoOps allows us to provide convenient ops that mirror existing ops without taking up another opcode. Using "txn" in version 2 and on, for example, determines whether to actually assemble txn or to use txna instead based on the number of immediates.
@@ -1586,8 +1715,8 @@ func getSpec(ops *OpStream, name string, args []string) (OpSpec, string, bool) {
const anyImmediates = -1
var pseudoOps = map[string]map[int]OpSpec{
- "int": {anyImmediates: OpSpec{Name: "int", Proto: proto(":i"), OpDetails: assembler(asmInt)}},
- "byte": {anyImmediates: OpSpec{Name: "byte", Proto: proto(":b"), OpDetails: assembler(asmByte)}},
+ "int": {anyImmediates: OpSpec{Name: "int", Proto: proto(":i"), OpDetails: assembler(asmInt).typed(typePushInt)}},
+ "byte": {anyImmediates: OpSpec{Name: "byte", Proto: proto(":b"), OpDetails: assembler(asmByte).typed(typeByte)}},
// parse basics.Address, actually just another []byte constant
"addr": {anyImmediates: OpSpec{Name: "addr", Proto: proto(":b"), OpDetails: assembler(asmAddr)}},
// take a signature, hash it, and take first 4 bytes, actually just another []byte constant
@@ -1680,17 +1809,25 @@ func prepareVersionedPseudoTable(version uint64) map[string]map[int]OpSpec {
return m
}
-type lineError struct {
- Line int
- Err error
+type sourceError struct {
+ Line int
+ Column int
+ Err error
+}
+
+func (se sourceError) Error() string {
+ if se.Column != 0 {
+ return fmt.Sprintf("%d:%d: %s", se.Line, se.Column, se.Err.Error())
+ }
+ return fmt.Sprintf("%d: %s", se.Line, se.Err.Error())
}
-func (le lineError) Error() string {
- return fmt.Sprintf("%d: %s", le.Line, le.Err.Error())
+func (se sourceError) Unwrap() error {
+ return se.Err
}
-func (le lineError) Unwrap() error {
- return le.Err
+func errorLinef(line int, format string, a ...interface{}) *sourceError {
+ return &sourceError{line, 0, fmt.Errorf(format, a...)}
}
func typecheck(expected, got StackType) bool {
@@ -1705,16 +1842,36 @@ func typecheck(expected, got StackType) bool {
return expected == got
}
+type token struct {
+ str string
+ col int
+ line int
+}
+
+func (t token) error(err error) *sourceError {
+ return &sourceError{t.line, t.col, err}
+}
+
+func (t token) errorf(format string, args ...interface{}) *sourceError {
+ return t.error(fmt.Errorf(format, args...))
+}
+
+func (t token) errorAfterf(format string, args ...interface{}) *sourceError {
+ return &sourceError{t.line, t.col + len(t.str), fmt.Errorf(format, args...)}
+}
+
// newline not included since handled in scanner
var tokenSeparators = [256]bool{'\t': true, ' ': true, ';': true}
-func tokensFromLine(line string) []string {
- var tokens []string
+// tokensFromLine splits a line into tokens, ignoring comments. tokens are
+// annotated with the provided lineno, and column where they are found.
+func tokensFromLine(sourceLine string, lineno int) []token {
+ var tokens []token
i := 0
- for i < len(line) && tokenSeparators[line[i]] {
- if line[i] == ';' {
- tokens = append(tokens, ";")
+ for i < len(sourceLine) && tokenSeparators[sourceLine[i]] {
+ if sourceLine[i] == ';' {
+ tokens = append(tokens, token{";", i, lineno})
}
i++
}
@@ -1722,28 +1879,28 @@ func tokensFromLine(line string) []string {
start := i
inString := false // tracked to allow spaces and comments inside
inBase64 := false // tracked to allow '//' inside
- for i < len(line) {
- if !tokenSeparators[line[i]] { // if not space
- switch line[i] {
+ for i < len(sourceLine) {
+ if !tokenSeparators[sourceLine[i]] { // if not space
+ switch sourceLine[i] {
case '"': // is a string literal?
if !inString {
- if i == 0 || i > 0 && tokenSeparators[line[i-1]] {
+ if i == 0 || i > 0 && tokenSeparators[sourceLine[i-1]] {
inString = true
}
} else {
- if line[i-1] != '\\' { // if not escape symbol
+ if sourceLine[i-1] != '\\' { // if not escape symbol
inString = false
}
}
case '/': // is a comment?
- if i < len(line)-1 && line[i+1] == '/' && !inBase64 && !inString {
+ if i < len(sourceLine)-1 && sourceLine[i+1] == '/' && !inBase64 && !inString {
if start != i { // if a comment without whitespace
- tokens = append(tokens, line[start:i])
+ tokens = append(tokens, token{sourceLine[start:i], start, lineno})
}
return tokens
}
case '(': // is base64( seq?
- prefix := line[start:i]
+ prefix := sourceLine[start:i]
if prefix == "base64" || prefix == "b64" {
inBase64 = true
}
@@ -1760,14 +1917,14 @@ func tokensFromLine(line string) []string {
// we've hit a space, end last token unless inString
if !inString {
- token := line[start:i]
- tokens = append(tokens, token)
- if line[i] == ';' {
- tokens = append(tokens, ";")
+ s := sourceLine[start:i]
+ tokens = append(tokens, token{s, start, lineno})
+ if sourceLine[i] == ';' {
+ tokens = append(tokens, token{";", i, lineno})
}
if inBase64 {
inBase64 = false
- } else if token == "base64" || token == "b64" {
+ } else if s == "base64" || s == "b64" {
inBase64 = true
}
}
@@ -1775,9 +1932,9 @@ func tokensFromLine(line string) []string {
// gobble up consecutive whitespace (but notice semis)
if !inString {
- for i < len(line) && tokenSeparators[line[i]] {
- if line[i] == ';' {
- tokens = append(tokens, ";")
+ for i < len(sourceLine) && tokenSeparators[sourceLine[i]] {
+ if sourceLine[i] == ';' {
+ tokens = append(tokens, token{";", i, lineno})
}
i++
}
@@ -1786,8 +1943,8 @@ func tokensFromLine(line string) []string {
}
// add rest of the string if any
- if start < len(line) {
- tokens = append(tokens, line[start:i])
+ if start < len(sourceLine) {
+ tokens = append(tokens, token{sourceLine[start:i], start, lineno})
}
return tokens
@@ -1800,22 +1957,32 @@ func (ops *OpStream) trace(format string, args ...interface{}) {
fmt.Fprintf(ops.Trace, format, args...)
}
-func (ops *OpStream) typeErrorf(format string, args ...interface{}) {
+func (ops *OpStream) typeErrorf(opcode token, format string, args ...interface{}) {
if ops.typeTracking {
- ops.errorf(format, args...)
+ ops.record(opcode.errorf(format, args...))
+ }
+}
+
+func reJoin(instruction string, args []token) string {
+ var buf bytes.Buffer
+ buf.WriteString(instruction)
+ for _, t := range args {
+ buf.WriteString(" ")
+ buf.WriteString(t.str)
}
+ return buf.String()
}
// trackStack checks that the typeStack has `args` on it, then pushes `returns` to it.
-func (ops *OpStream) trackStack(args StackTypes, returns StackTypes, instruction []string) {
+func (ops *OpStream) trackStack(args StackTypes, returns StackTypes, instruction string, tokens []token) {
// If in deadcode, allow anything. Maybe it's some sort of onchain data.
if ops.known.deadcode {
return
}
argcount := len(args)
- if argcount > len(ops.known.stack) && ops.known.bottom == StackNone {
- ops.typeErrorf("%s expects %d stack arguments but stack height is %d",
- strings.Join(instruction, " "), argcount, len(ops.known.stack))
+ if argcount > len(ops.known.stack) && ops.known.bottom.AVMType == avmNone {
+ ops.typeErrorf(tokens[0], "%s expects %d stack arguments but stack height is %d",
+ reJoin(instruction, tokens[1:]), argcount, len(ops.known.stack))
} else {
firstPop := true
for i := argcount - 1; i >= 0; i-- {
@@ -1827,9 +1994,9 @@ func (ops *OpStream) trackStack(args StackTypes, returns StackTypes, instruction
} else {
ops.trace(", %s", argType)
}
- if !typecheck(argType, stype) {
- ops.typeErrorf("%s arg %d wanted type %s got %s",
- strings.Join(instruction, " "), i, argType, stype)
+ if !stype.overlaps(argType) {
+ ops.typeErrorf(tokens[0], "%s arg %d wanted type %s got %s",
+ reJoin(instruction, tokens[1:]), i, argType, stype)
}
}
if !firstPop {
@@ -1850,49 +2017,55 @@ func (ops *OpStream) trackStack(args StackTypes, returns StackTypes, instruction
}
// nextStatement breaks tokens into two slices at the first semicolon and expands macros along the way.
-func nextStatement(ops *OpStream, tokens []string) (current, rest []string) {
+func nextStatement(ops *OpStream, tokens []token) (current, rest []token) {
for i := 0; i < len(tokens); i++ {
token := tokens[i]
- replacement, ok := ops.macros[token]
+ replacement, ok := ops.macros[token.str]
if ok {
- tokens = append(tokens[0:i], append(replacement, tokens[i+1:]...)...)
+ tokens = append(tokens[0:i], append(replacement[1:], tokens[i+1:]...)...)
// backup to handle potential re-expansion of the first token in the expansion
i--
continue
}
- if token == ";" {
+ if token.str == ";" {
return tokens[:i], tokens[i+1:]
}
}
return tokens, nil
}
-type directiveFunc func(*OpStream, []string) error
+type directiveFunc func(*OpStream, []token) *sourceError
var directives = map[string]directiveFunc{"pragma": pragma, "define": define}
// assemble reads text from an input and accumulates the program
func (ops *OpStream) assemble(text string) error {
if ops.Version > LogicVersion && ops.Version != assemblerNoVersion {
- return ops.errorf("Can not assemble version %d", ops.Version)
+ err := fmt.Errorf("Can not assemble version %d", ops.Version)
+ ops.record(&sourceError{0, 0, err})
+ return err
}
if strings.TrimSpace(text) == "" {
- return ops.errorf("Cannot assemble empty program text")
+ err := errors.New("Cannot assemble empty program text")
+ ops.record(&sourceError{0, 0, err})
+ return err
}
fin := strings.NewReader(text)
scanner := bufio.NewScanner(fin)
for scanner.Scan() {
ops.sourceLine++
line := scanner.Text()
- tokens := tokensFromLine(line)
+ tokens := tokensFromLine(line, ops.sourceLine)
if len(tokens) > 0 {
- if first := tokens[0]; first[0] == '#' {
- directive := first[1:]
+ if first := tokens[0]; first.str[0] == '#' {
+ directive := first.str[1:]
if dFunc, ok := directives[directive]; ok {
- _ = dFunc(ops, tokens)
- ops.trace("%3d: %s line\n", ops.sourceLine, first)
+ if err := dFunc(ops, tokens); err != nil {
+ ops.record(err)
+ }
+ ops.trace("%3d: %s line\n", first.line, first.str)
} else {
- ops.errorf("Unknown directive: %s", directive)
+ ops.record(first.errorf("unknown directive: %s", directive))
}
continue
}
@@ -1909,22 +2082,22 @@ func (ops *OpStream) assemble(text string) error {
if ops.versionedPseudoOps == nil {
ops.versionedPseudoOps = prepareVersionedPseudoTable(ops.Version)
}
- opstring := current[0]
+ opstring := current[0].str
if opstring[len(opstring)-1] == ':' {
labelName := opstring[:len(opstring)-1]
if _, ok := ops.macros[labelName]; ok {
- ops.errorf("Cannot create label with same name as macro: %s", labelName)
+ ops.record(current[0].errorf("Cannot create label with same name as macro: %s", labelName))
} else {
- ops.createLabel(opstring[:len(opstring)-1])
+ ops.createLabel(current[0])
}
current = current[1:]
if len(current) == 0 {
ops.trace("%3d: label only\n", ops.sourceLine)
continue
}
- opstring = current[0]
+ opstring = current[0].str
}
- spec, expandedName, ok := getSpec(ops, opstring, current[1:])
+ spec, expandedName, ok := getSpec(ops, current[0], len(current)-1)
if ok {
ops.trace("%3d: %s\t", ops.sourceLine, opstring)
ops.recordSourceLine()
@@ -1935,7 +2108,7 @@ func (ops *OpStream) assemble(text string) error {
if spec.refine != nil {
nargs, nreturns, err := spec.refine(&ops.known, current[1:])
if err != nil {
- ops.typeErrorf("%w", err)
+ ops.typeErrorf(current[0], "%w", err)
}
if nargs != nil {
args = nargs
@@ -1944,9 +2117,10 @@ func (ops *OpStream) assemble(text string) error {
returns = nreturns
}
}
- ops.trackStack(args, returns, append([]string{expandedName}, current[1:]...))
- spec.asm(ops, &spec, current[1:]) //nolint:errcheck // ignore error and continue, to collect more errors
-
+ ops.trackStack(args, returns, expandedName, current)
+ if err := spec.asm(ops, &spec, current[0], current[1:]); err != nil {
+ ops.record(err)
+ }
if spec.deadens() { // An unconditional branch deadens the following code
ops.known.deaden()
}
@@ -1964,16 +2138,7 @@ func (ops *OpStream) assemble(text string) error {
if errors.Is(err, bufio.ErrTooLong) {
err = errors.New("line too long")
}
- ops.error(err)
- }
-
- // backward compatibility: do not allow jumps past last instruction in v1
- if ops.Version <= 1 {
- for label, dest := range ops.labels {
- if dest == ops.pending.Len() {
- ops.errorf("label %#v is too far away", label)
- }
- }
+ ops.record(&sourceError{ops.sourceLine, 0, err})
}
if ops.Version >= optimizeConstantsEnabledVersion {
@@ -1986,7 +2151,7 @@ func (ops *OpStream) assemble(text string) error {
if ops.Errors != nil {
l := len(ops.Errors)
if l == 1 {
- return errors.New("1 error")
+ return fmt.Errorf("1 error: %w", ops.Errors[0])
}
return fmt.Errorf("%d errors", l)
}
@@ -1994,21 +2159,23 @@ func (ops *OpStream) assemble(text string) error {
return nil
}
-func (ops *OpStream) cycle(macro string, previous ...string) bool {
- replacement, ok := ops.macros[macro]
+// cycle return a slice of strings that constitute a cycle, if one is
+// found. That is, the first token expands to the second, and so on, with the
+// first and last string being the same.
+func (ops *OpStream) cycle(macro token, previous []string) []string {
+ replacement, ok := ops.macros[macro.str]
if !ok {
- return false
+ return nil
}
- if len(previous) > 0 && macro == previous[0] {
- ops.errorf("Macro cycle discovered: %s", strings.Join(append(previous, macro), " -> "))
- return true
+ if len(previous) > 0 && macro.str == previous[0] {
+ return append(previous, macro.str)
}
- for _, token := range replacement {
- if ops.cycle(token, append(previous, macro)...) {
- return true
+ for _, repl := range replacement[1:] {
+ if found := ops.cycle(repl, append(previous, macro.str)); found != nil {
+ return found
}
}
- return false
+ return nil
}
// recheckMacroNames goes through previously defined macros and ensures they
@@ -2019,8 +2186,8 @@ func (ops *OpStream) recheckMacroNames() error {
for macroName := range ops.macros {
err := checkMacroName(macroName, ops.Version, ops.labels)
if err != nil {
+ ops.record(ops.macros[macroName][0].error(err))
delete(ops.macros, macroName)
- ops.error(err)
errored = true
}
}
@@ -2084,54 +2251,58 @@ func checkMacroName(macroName string, version uint64, labels map[string]int) err
return nil
}
-func define(ops *OpStream, tokens []string) error {
- if tokens[0] != "#define" {
- return ops.errorf("invalid syntax: %s", tokens[0])
+func define(ops *OpStream, tokens []token) *sourceError {
+ if tokens[0].str != "#define" {
+ return tokens[0].errorf("invalid syntax: %s", tokens[0].str)
}
if len(tokens) < 3 {
- return ops.errorf("define directive requires a name and body")
+ return tokens[len(tokens)-1].errorf("define directive requires a name and body")
}
- name := tokens[1]
+ name := tokens[1].str
err := checkMacroName(name, ops.Version, ops.labels)
if err != nil {
- return ops.error(err)
+ return tokens[1].error(err)
}
saved, ok := ops.macros[name]
- ops.macros[name] = tokens[2:len(tokens):len(tokens)]
- if ops.cycle(tokens[1]) {
+ ops.macros[name] = tokens[1:len(tokens):len(tokens)] // include the name itself at the front
+ if found := ops.cycle(tokens[1], nil); found != nil {
if ok {
- ops.macros[tokens[1]] = saved
+ ops.macros[name] = saved // restore previous macro
} else {
- delete(ops.macros, tokens[1])
+ delete(ops.macros, name) // remove new macro that caused cycle
}
+ return tokens[1].errorf("macro expansion cycle discovered: %s", strings.Join(found, " -> "))
}
return nil
}
-func pragma(ops *OpStream, tokens []string) error {
- if tokens[0] != "#pragma" {
- return ops.errorf("invalid syntax: %s", tokens[0])
+func pragma(ops *OpStream, tokens []token) *sourceError {
+ if tokens[0].str != "#pragma" {
+ return tokens[0].errorf("invalid syntax: %s", tokens[0].str)
}
if len(tokens) < 2 {
- return ops.error("empty pragma")
+ return tokens[0].errorf("empty pragma")
}
- key := tokens[1]
+ key := tokens[1].str
switch key {
case "version":
if len(tokens) < 3 {
- return ops.error("no version value")
+ return tokens[1].errorf("no version value")
+ }
+ if len(tokens) > 3 {
+ return tokens[3].errorf("unexpected extra tokens:%s", reJoin("", tokens[3:]))
}
- value := tokens[2]
var ver uint64
if ops.pending.Len() > 0 {
- return ops.error("#pragma version is only allowed before instructions")
+ return tokens[0].errorf("#pragma version is only allowed before instructions")
}
+ value := tokens[2].str
ver, err := strconv.ParseUint(value, 0, 64)
if err != nil {
- return ops.errorf("bad #pragma version: %#v", value)
+ return tokens[2].errorf("bad #pragma version: %#v", value)
}
if ver > AssemblerMaxVersion {
- return ops.errorf("unsupported version: %d", ver)
+ return tokens[2].errorf("unsupported version: %d", ver)
}
// We initialize Version with assemblerNoVersion as a marker for
@@ -2139,21 +2310,27 @@ func pragma(ops *OpStream, tokens []string) error {
// version for v1.
if ops.Version == assemblerNoVersion {
ops.Version = ver
- return ops.recheckMacroNames()
+ if err := ops.recheckMacroNames(); err != nil {
+ return tokens[2].error(err)
+ }
+ return nil
}
if ops.Version != ver {
- return ops.errorf("version mismatch: assembling v%d with v%d assembler", ver, ops.Version)
+ return tokens[2].errorf("version mismatch: assembling v%d with v%d assembler", ver, ops.Version)
}
// ops.Version is already correct, or needed to be upped.
return nil
case "typetrack":
if len(tokens) < 3 {
- return ops.error("no typetrack value")
+ return tokens[1].errorf("no typetrack value")
+ }
+ if len(tokens) > 3 {
+ return tokens[3].errorf("unexpected extra tokens:%s", reJoin("", tokens[3:]))
}
- value := tokens[2]
+ value := tokens[2].str
on, err := strconv.ParseBool(value)
if err != nil {
- return ops.errorf("bad #pragma typetrack: %#v", value)
+ return tokens[2].errorf("bad #pragma typetrack: %#v", value)
}
prev := ops.typeTracking
if !prev && on {
@@ -2163,41 +2340,45 @@ func pragma(ops *OpStream, tokens []string) error {
return nil
default:
- return ops.errorf("unsupported pragma directive: %#v", key)
+ return tokens[0].errorf("unsupported pragma directive: %#v", key)
}
}
func (ops *OpStream) resolveLabels() {
- saved := ops.sourceLine
raw := ops.pending.Bytes()
reported := make(map[string]bool)
for _, lr := range ops.labelReferences {
- ops.sourceLine = lr.sourceLine // so errors get reported where the label was used
- dest, ok := ops.labels[lr.label]
+ dest, ok := ops.labels[lr.label.str]
if !ok {
- if !reported[lr.label] {
- ops.errorf("reference to undefined label %#v", lr.label)
+ if !reported[lr.label.str] {
+ ops.record(lr.label.errorf("reference to undefined label %#v", lr.label.str))
}
- reported[lr.label] = true
+ reported[lr.label.str] = true
continue
}
+ // backward compatibility: do not allow jumps past last instruction in v1
+ if ops.Version <= 1 {
+ if dest == ops.pending.Len() {
+ ops.record(lr.label.errorf("label %#v is too far away", lr.label.str))
+ }
+ }
+
// All branch targets are encoded as 2 offset bytes. The destination is relative to the end of the
// instruction they appear in, which is available in lr.offsetPostion
if ops.Version < backBranchEnabledVersion && dest < lr.offsetPosition {
- ops.errorf("label %#v is a back reference, back jump support was introduced in v4", lr.label)
+ ops.record(lr.label.errorf("label %#v is a back reference, back jump support was introduced in v4", lr.label.str))
continue
}
jump := dest - lr.offsetPosition
if jump > 0x7fff {
- ops.errorf("label %#v is too far away", lr.label)
+ ops.record(lr.label.errorf("label %#v is too far away", lr.label.str))
continue
}
raw[lr.position] = uint8(jump >> 8)
raw[lr.position+1] = uint8(jump & 0x0ff)
}
ops.pending = *bytes.NewBuffer(raw)
- ops.sourceLine = saved
}
// AssemblerDefaultVersion what version of code do we emit by default
@@ -2347,14 +2528,14 @@ func (ops *OpStream) optimizeConstants(refs []constReference, constBlock []inter
}
}
if !found {
- err = ops.lineErrorf(ops.OffsetToLine[ref.getPosition()], "Value not found in constant block: %v", ref.getValue())
+ err = errorLinef(ops.OffsetToLine[ref.getPosition()], "value not found in constant block: %v", ref.getValue())
return
}
}
for _, f := range freqs {
if f.freq == 0 {
- err = ops.errorf("Member of constant block is not used: %v", f.value)
+ err = errorLinef(ops.sourceLine, "member of constant block is not used: %v", f.value)
return
}
}
@@ -2385,7 +2566,7 @@ func (ops *OpStream) optimizeConstants(refs []constReference, constBlock []inter
}
}
if newIndex == -1 {
- return nil, ops.lineErrorf(ops.OffsetToLine[ref.getPosition()], "Value not found in constant block: %v", ref.getValue())
+ return nil, errorLinef(ops.OffsetToLine[ref.getPosition()], "value not found in constant block: %v", ref.getValue())
}
newBytes := ref.makeNewReference(ops, singleton, newIndex)
@@ -2487,12 +2668,12 @@ func (ops *OpStream) prependCBlocks() []byte {
out := make([]byte, pbl+outl)
pl, err := prebytes.Read(out)
if pl != pbl || err != nil {
- ops.errorf("wat: %d prebytes, %d to buffer? err=%w", pbl, pl, err)
+ ops.record(&sourceError{ops.sourceLine, 0, fmt.Errorf("%d prebytes, %d to buffer? %w", pbl, pl, err)})
return nil
}
ol, err := ops.pending.Read(out[pl:])
if ol != outl || err != nil {
- ops.errorf("%d program bytes but %d to buffer. err=%w", outl, ol, err)
+ ops.record(&sourceError{ops.sourceLine, 0, fmt.Errorf("%d program bytes but %d to buffer. %w", outl, ol, err)})
return nil
}
@@ -2506,48 +2687,17 @@ func (ops *OpStream) prependCBlocks() []byte {
return out
}
-func (ops *OpStream) error(problem interface{}) error {
- return ops.lineError(ops.sourceLine, problem)
-}
-
-func (ops *OpStream) lineError(line int, problem interface{}) error {
- var err lineError
- switch p := problem.(type) {
- case string:
- err = lineError{Line: line, Err: errors.New(p)}
- case error:
- err = lineError{Line: line, Err: p}
- default:
- err = lineError{Line: line, Err: fmt.Errorf("%#v", p)}
- }
- ops.Errors = append(ops.Errors, err)
- return err
-}
-
-func (ops *OpStream) errorf(format string, a ...interface{}) error {
- return ops.error(fmt.Errorf(format, a...))
-}
-
-func (ops *OpStream) lineErrorf(line int, format string, a ...interface{}) error {
- return ops.lineError(line, fmt.Errorf(format, a...))
+// record puts an error onto a list for reporting later. The hope is that the
+// assembler can keep going after an error that is "swallowed" this way, so that
+// more than one TEAL error can be reported in one pass. By explicitly
+// swallowing the error this way, we can use the linter properly.
+func (ops *OpStream) record(se *sourceError) {
+ ops.Errors = append(ops.Errors, *se)
}
-func (ops *OpStream) warn(problem interface{}) error {
- var le *lineError
- switch p := problem.(type) {
- case string:
- le = &lineError{Line: ops.sourceLine, Err: errors.New(p)}
- case error:
- le = &lineError{Line: ops.sourceLine, Err: p}
- default:
- le = &lineError{Line: ops.sourceLine, Err: fmt.Errorf("%#v", p)}
- }
- warning := fmt.Errorf("warning: %w", le)
+func (ops *OpStream) warn(t token, format string, a ...interface{}) {
+ warning := &sourceError{Line: t.line, Column: t.col, Err: fmt.Errorf(format, a...)}
ops.Warnings = append(ops.Warnings, warning)
- return warning
-}
-func (ops *OpStream) warnf(format string, a ...interface{}) error {
- return ops.warn(fmt.Errorf(format, a...))
}
// ReportMultipleErrors issues accumulated warnings and outputs errors to an io.Writer.
@@ -2650,7 +2800,7 @@ func disassemble(dis *disassembleState, spec *OpSpec) (string, error) {
out += " "
switch imm.kind {
case immByte, immInt8:
- if pc >= len(dis.program) {
+ if pc+1 > len(dis.program) {
return "", fmt.Errorf("program end while reading immediate %s for %s",
imm.Name, spec.Name)
}
@@ -2680,6 +2830,10 @@ func disassemble(dis *disassembleState, spec *OpSpec) (string, error) {
pc++
case immLabel:
+ // decodeBranchOffset assumes it has two bytes to work with
+ if pc+2 > len(dis.program) {
+ return "", fmt.Errorf("program end while reading label for %s", spec.Name)
+ }
offset := decodeBranchOffset(dis.program, pc)
target := offset + pc + 2
var label string
@@ -2744,9 +2898,9 @@ func disassemble(dis *disassembleState, spec *OpSpec) (string, error) {
}
pc = nextpc
case immLabels:
- targets, nextpc, err := parseSwitch(dis.program, pc)
+ targets, nextpc, err := parseLabels(dis.program, pc)
if err != nil {
- return "", err
+ return "", fmt.Errorf("%w for %s", err, spec.Name)
}
var labels []string
@@ -2873,10 +3027,18 @@ func checkByteImmArgs(cx *EvalContext) error {
return err
}
-func parseSwitch(program []byte, pos int) (targets []int, nextpc int, err error) {
+func parseLabels(program []byte, pos int) (targets []int, nextpc int, err error) {
+ if pos >= len(program) {
+ err = errors.New("could not decode label count")
+ return
+ }
numOffsets := int(program[pos])
pos++
end := pos + 2*numOffsets // end of op: offset is applied to this position
+ if end > len(program) {
+ err = errors.New("could not decode labels")
+ return
+ }
for i := 0; i < numOffsets; i++ {
offset := decodeBranchOffset(program, pos)
target := end + offset
@@ -2956,12 +3118,12 @@ func disassembleInstrumented(program []byte, labels map[int]string) (text string
ds.pcOffset = append(ds.pcOffset, PCOffset{dis.pc, out.Len()})
// Actually do the disassembly
- var line string
- line, err = disassemble(&dis, &op)
+ var instruction string
+ instruction, err = disassemble(&dis, &op)
if err != nil {
return
}
- out.WriteString(line)
+ out.WriteString(instruction)
out.WriteRune('\n')
dis.pc = dis.nextpc
}
diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go
index 665741e96..713528b72 100644
--- a/data/transactions/logic/assembler_test.go
+++ b/data/transactions/logic/assembler_test.go
@@ -22,6 +22,7 @@ import (
"errors"
"fmt"
"regexp"
+ "strconv"
"strings"
"testing"
@@ -428,7 +429,8 @@ pushbytess "1" "2" "1"
const v8Nonsense = v7Nonsense + switchNonsense + frameNonsense + matchNonsense + boxNonsense
-const v9Nonsense = v8Nonsense + pairingNonsense
+const v9Nonsense = v8Nonsense
+const v10Nonsense = v9Nonsense + pairingNonsense
const v6Compiled = "2004010002b7a60c26050242420c68656c6c6f20776f726c6421070123456789abcd208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292b0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f2310231123122313231418191a1b1c28171615400003290349483403350222231d4a484848482b50512a632223524100034200004322602261222704634848222862482864286548482228246628226723286828692322700048482371004848361c0037001a0031183119311b311d311e311f312023221e312131223123312431253126312731283129312a312b312c312d312e312f447825225314225427042455220824564c4d4b0222382124391c0081e80780046a6f686e2281d00f23241f880003420001892224902291922494249593a0a1a2a3a4a5a6a7a8a9aaabacadae24af3a00003b003c003d816472064e014f012a57000823810858235b235a2359b03139330039b1b200b322c01a23c1001a2323c21a23c3233e233f8120af06002a494905002a49490700b400b53a03b6b7043cb8033a0c2349c42a9631007300810881088120978101c53a8101c6003a"
@@ -444,30 +446,33 @@ const matchCompiled = "83030102018e02fff500008203013101320131"
const v8Compiled = v7Compiled + switchCompiled + frameCompiled + matchCompiled + boxCompiled
-const v9Compiled = v8Compiled + pairingCompiled
+const v9Compiled = v8Compiled
+const v10Compiled = v9Compiled + pairingCompiled
var nonsense = map[uint64]string{
- 1: v1Nonsense,
- 2: v2Nonsense,
- 3: v3Nonsense,
- 4: v4Nonsense,
- 5: v5Nonsense,
- 6: v6Nonsense,
- 7: v7Nonsense,
- 8: v8Nonsense,
- 9: v9Nonsense,
+ 1: v1Nonsense,
+ 2: v2Nonsense,
+ 3: v3Nonsense,
+ 4: v4Nonsense,
+ 5: v5Nonsense,
+ 6: v6Nonsense,
+ 7: v7Nonsense,
+ 8: v8Nonsense,
+ 9: v9Nonsense,
+ 10: v10Nonsense,
}
var compiled = map[uint64]string{
- 1: "012008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f2310231123122313231418191a1b1c2b1716154000032903494",
- 2: "022008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f2310231123122313231418191a1b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f",
- 3: "032008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f2310231123122313231418191a1b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f4478222105531421055427042106552105082106564c4d4b02210538212106391c0081e80780046a6f686e",
- 4: "042004010200b7a60c26040242420c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f2310231123122313231418191a1b1c28171615400003290349483403350222231d4a484848482a50512a632223524100034200004322602261222b634848222862482864286548482228236628226724286828692422700048482471004848361c0037001a0031183119311b311d311e311f312024221e312131223123312431253126312731283129312a312b312c312d312e312f44782522531422542b2355220823564c4d4b0222382123391c0081e80780046a6f686e2281d00f24231f880003420001892223902291922394239593a0a1a2a3a4a5a6a7a8a9aaabacadae23af3a00003b003c003d8164",
- 5: "052004010002b7a60c26050242420c68656c6c6f20776f726c6421070123456789abcd208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292b0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f2310231123122313231418191a1b1c28171615400003290349483403350222231d4a484848482b50512a632223524100034200004322602261222704634848222862482864286548482228246628226723286828692322700048482371004848361c0037001a0031183119311b311d311e311f312023221e312131223123312431253126312731283129312a312b312c312d312e312f447825225314225427042455220824564c4d4b0222382124391c0081e80780046a6f686e2281d00f23241f880003420001892224902291922494249593a0a1a2a3a4a5a6a7a8a9aaabacadae24af3a00003b003c003d816472064e014f012a57000823810858235b235a2359b03139330039b1b200b322c01a23c1001a2323c21a23c3233e233f8120af06002a494905002a49490700b400b53a03",
- 6: "06" + v6Compiled,
- 7: "07" + v7Compiled,
- 8: "08" + v8Compiled,
- 9: "09" + v9Compiled,
+ 1: "012008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f2310231123122313231418191a1b1c2b1716154000032903494",
+ 2: "022008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f2310231123122313231418191a1b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f",
+ 3: "032008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f2310231123122313231418191a1b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f4478222105531421055427042106552105082106564c4d4b02210538212106391c0081e80780046a6f686e",
+ 4: "042004010200b7a60c26040242420c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f2310231123122313231418191a1b1c28171615400003290349483403350222231d4a484848482a50512a632223524100034200004322602261222b634848222862482864286548482228236628226724286828692422700048482471004848361c0037001a0031183119311b311d311e311f312024221e312131223123312431253126312731283129312a312b312c312d312e312f44782522531422542b2355220823564c4d4b0222382123391c0081e80780046a6f686e2281d00f24231f880003420001892223902291922394239593a0a1a2a3a4a5a6a7a8a9aaabacadae23af3a00003b003c003d8164",
+ 5: "052004010002b7a60c26050242420c68656c6c6f20776f726c6421070123456789abcd208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292b0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f2310231123122313231418191a1b1c28171615400003290349483403350222231d4a484848482b50512a632223524100034200004322602261222704634848222862482864286548482228246628226723286828692322700048482371004848361c0037001a0031183119311b311d311e311f312023221e312131223123312431253126312731283129312a312b312c312d312e312f447825225314225427042455220824564c4d4b0222382124391c0081e80780046a6f686e2281d00f23241f880003420001892224902291922494249593a0a1a2a3a4a5a6a7a8a9aaabacadae24af3a00003b003c003d816472064e014f012a57000823810858235b235a2359b03139330039b1b200b322c01a23c1001a2323c21a23c3233e233f8120af06002a494905002a49490700b400b53a03",
+ 6: "06" + v6Compiled,
+ 7: "07" + v7Compiled,
+ 8: "08" + v8Compiled,
+ 9: "09" + v9Compiled,
+ 10: "0a" + v10Compiled,
}
func pseudoOp(opcode string) bool {
@@ -553,11 +558,24 @@ pop
require.Equal(t, ops1.Program, ops2.Program)
}
-type Expect struct {
+type expect struct {
l int
+ c int
s string
}
+func exp(l int, s string, extra ...int) expect {
+ e := expect{l: l, c: 0, s: s}
+ switch len(extra) {
+ case 0: /* nothing */
+ case 1:
+ e.c = extra[0]
+ default:
+ panic(extra)
+ }
+ return e
+}
+
func testMatch(t testing.TB, actual, expected string) (ok bool) {
defer func() {
t.Helper()
@@ -608,7 +626,7 @@ func summarize(trace *strings.Builder) string {
return msg + "(trace truncated)\n"
}
-func testProg(t testing.TB, source string, ver uint64, expected ...Expect) *OpStream {
+func testProg(t testing.TB, source string, ver uint64, expected ...expect) *OpStream {
t.Helper()
ops, err := assembleWithTrace(source, ver)
if len(expected) == 0 {
@@ -658,7 +676,7 @@ func testProg(t testing.TB, source string, ver uint64, expected ...Expect) *OpSt
t.FailNow()
}
} else {
- var found *lineError
+ var found *sourceError
for i := range errors {
if errors[i].Line == exp.l {
found = &errors[i]
@@ -669,7 +687,11 @@ func testProg(t testing.TB, source string, ver uint64, expected ...Expect) *OpSt
t.Log(fmt.Sprintf("Errors: %v", errors))
}
require.NotNil(t, found, "Error %s was not found on line %d", exp.s, exp.l)
- msg := found.Unwrap().Error()
+ if exp.c != 0 {
+ require.Equal(t, exp.c, found.Column, "Error %s was not found at column %d. %s was.",
+ exp.s, exp.c, found.Error())
+ }
+ msg := found.Unwrap().Error() // unwrap avoids line,col prefix for easier matching
if !testMatch(t, msg, exp.s) {
t.Log(summarize(ops.Trace))
t.FailNow()
@@ -681,7 +703,7 @@ func testProg(t testing.TB, source string, ver uint64, expected ...Expect) *OpSt
return ops
}
-func testLine(t *testing.T, line string, ver uint64, expected string) {
+func testLine(t *testing.T, line string, ver uint64, expected string, col ...int) {
t.Helper()
// By embedding the source line between two other lines, the
// test for the correct line number in the error is more
@@ -691,35 +713,36 @@ func testLine(t *testing.T, line string, ver uint64, expected string) {
testProg(t, source, ver)
return
}
- testProg(t, source, ver, Expect{2, expected})
+ testProg(t, source, ver, exp(2, expected, col...))
}
func TestAssembleTxna(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testLine(t, "txna Accounts 256", AssemblerMaxVersion, "txna i beyond 255: 256")
- testLine(t, "txna ApplicationArgs 256", AssemblerMaxVersion, "txna i beyond 255: 256")
- testLine(t, "txna Sender 256", AssemblerMaxVersion, "txna unknown field: \"Sender\"")
- testLine(t, "gtxna 0 Accounts 256", AssemblerMaxVersion, "gtxna i beyond 255: 256")
+ testLine(t, "txna Accounts 256", AssemblerMaxVersion, "txna i beyond 255: 256", 14)
+ testLine(t, "txna ApplicationArgs 256", AssemblerMaxVersion, "txna i beyond 255: 256", 21)
+ testLine(t, "txna Sender 256", AssemblerMaxVersion, "txna unknown field: \"Sender\"", 5)
+ testLine(t, "gtxna 0 Accounts 256", AssemblerMaxVersion, "gtxna i beyond 255: 256", 17)
testLine(t, "gtxna 0 ApplicationArgs 256", AssemblerMaxVersion, "gtxna i beyond 255: 256")
- testLine(t, "gtxna 256 Accounts 0", AssemblerMaxVersion, "gtxna t beyond 255: 256")
- testLine(t, "gtxna 0 Sender 256", AssemblerMaxVersion, "gtxna unknown field: \"Sender\"")
+ testLine(t, "gtxna 256 Accounts 0", AssemblerMaxVersion, "gtxna t beyond 255: 256", 6)
+ testLine(t, "gtxna 0 Sender 256", AssemblerMaxVersion, "gtxna unknown field: \"Sender\"", 8)
testLine(t, "gtxna ApplicationArgs 0 255", AssemblerMaxVersion, "gtxna can only use \"ApplicationArgs\" as immediate 2")
testLine(t, "gtxna 0 255 ApplicationArgs", AssemblerMaxVersion, "gtxna can only use \"255\" as immediate 1 or 3")
testLine(t, "txn Accounts 256", AssemblerMaxVersion, "txn i beyond 255: 256")
testLine(t, "txn ApplicationArgs 256", AssemblerMaxVersion, "txn i beyond 255: 256")
- testLine(t, "txn 255 ApplicationArgs", AssemblerMaxVersion, "txn with 2 immediates can only use \"255\" as immediate 2")
+ testLine(t, "txn 255 ApplicationArgs", AssemblerMaxVersion, "txn with 2 immediates can only use \"255\" as immediate 2", 4)
testLine(t, "txn Sender 256", AssemblerMaxVersion, "\"Sender\" field of txn can only be used with 1 immediate")
testLine(t, "gtxn 0 Accounts 256", AssemblerMaxVersion, "gtxn i beyond 255: 256")
testLine(t, "gtxn 0 ApplicationArgs 256", AssemblerMaxVersion, "gtxn i beyond 255: 256")
testLine(t, "gtxn 256 Accounts 0", AssemblerMaxVersion, "gtxn t beyond 255: 256")
testLine(t, "gtxn 0 Sender 256", AssemblerMaxVersion, "\"Sender\" field of gtxn can only be used with 2 immediates")
- testLine(t, "gtxn ApplicationArgs 0 255", AssemblerMaxVersion, "gtxn with 3 immediates can only use \"ApplicationArgs\" as immediate 2")
+ testLine(t, "gtxn ApplicationArgs 0 255", AssemblerMaxVersion, "gtxn with 3 immediates can only use \"ApplicationArgs\" as immediate 2", 5)
testLine(t, "gtxn 0 255 ApplicationArgs", AssemblerMaxVersion, "gtxn with 3 immediates can only use \"255\" as immediate 1 or 3")
testLine(t, "txn Accounts 0", 1, "txn opcode with 2 immediates was introduced in v2")
+ testLine(t, "txn", 2, "txn expects 1 or 2 immediate arguments")
testLine(t, "txn Accounts 0 1", 2, "txn expects 1 or 2 immediate arguments")
testLine(t, "txna Accounts 0 1", AssemblerMaxVersion, "txna expects 2 immediate arguments")
testLine(t, "txn Accounts 0 1", AssemblerMaxVersion, "txn expects 1 or 2 immediate arguments")
@@ -753,14 +776,15 @@ func TestAssembleGlobal(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testLine(t, "global", AssemblerMaxVersion, "global expects 1 immediate argument")
+ testLine(t, "global", AssemblerMaxVersion, "global expects 1 immediate argument", 6)
+ testLine(t, "global aa bb", AssemblerMaxVersion, "global expects 1 immediate argument", 10)
testLine(t, "global a", AssemblerMaxVersion, "global unknown field: \"a\"")
testProg(t, "global MinTxnFee; int 2; +", AssemblerMaxVersion)
testProg(t, "global ZeroAddress; byte 0x12; concat; len", AssemblerMaxVersion)
testProg(t, "global MinTxnFee; byte 0x12; concat", AssemblerMaxVersion,
- Expect{1, "concat arg 0 wanted type []byte..."})
+ exp(1, "concat arg 0 wanted type []byte...", 29))
testProg(t, "int 2; global ZeroAddress; +", AssemblerMaxVersion,
- Expect{1, "+ arg 1 wanted type uint64..."})
+ exp(1, "+ arg 1 wanted type uint64...", 27))
}
func TestAssembleDefault(t *testing.T) {
@@ -769,10 +793,10 @@ func TestAssembleDefault(t *testing.T) {
source := `byte 0x1122334455
int 1
-+
+ +
// comment
`
- testProg(t, source, AssemblerMaxVersion, Expect{3, "+ arg 0 wanted type uint64 got []byte"})
+ testProg(t, source, AssemblerMaxVersion, exp(3, "+ arg 0 wanted type uint64 got [5]byte", 1))
}
// mutateProgVersion replaces version (first two symbols) in hex-encoded program
@@ -787,11 +811,11 @@ func TestOpUint(t *testing.T) {
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := newOpStream(v)
- ops.IntLiteral(0xcafebabe)
+ ops.intLiteral(0xcafef00d)
prog := ops.prependCBlocks()
require.NotNil(t, prog)
s := hex.EncodeToString(prog)
- expected := mutateProgVersion(v, "012001bef5fad70c22")
+ expected := mutateProgVersion(v, "xx20018de0fbd70c22")
require.Equal(t, expected, s)
})
}
@@ -804,11 +828,11 @@ func TestOpUint64(t *testing.T) {
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := newOpStream(v)
- ops.IntLiteral(0xcafebabecafebabe)
+ ops.intLiteral(0xcafef00dcafef00d)
prog := ops.prependCBlocks()
require.NotNil(t, prog)
s := hex.EncodeToString(prog)
- require.Equal(t, mutateProgVersion(v, "012001bef5fad7ecd7aeffca0122"), s)
+ require.Equal(t, mutateProgVersion(v, "xx20018de0fbd7dc81bcffca0122"), s)
})
}
}
@@ -820,12 +844,12 @@ func TestOpBytes(t *testing.T) {
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := newOpStream(v)
- ops.ByteLiteral([]byte("abcdef"))
+ ops.byteLiteral([]byte("abcdef"))
prog := ops.prependCBlocks()
require.NotNil(t, prog)
s := hex.EncodeToString(prog)
require.Equal(t, mutateProgVersion(v, "0126010661626364656628"), s)
- testProg(t, "byte 0x7; len", v, Expect{1, "...odd length hex string"})
+ testProg(t, "byte 0x7; len", v, exp(1, "...odd length hex string", 5))
})
}
}
@@ -834,8 +858,8 @@ func TestAssembleInt(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- expectedDefaultConsts := "012001bef5fad70c22"
- expectedOptimizedConsts := "0181bef5fad70c"
+ expectedDefaultConsts := "xx20018de0fbd70c22"
+ expectedOptimizedConsts := "xx818de0fbd70c"
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
@@ -844,7 +868,7 @@ func TestAssembleInt(t *testing.T) {
expected = expectedOptimizedConsts
}
- text := "int 0xcafebabe"
+ text := "int 0xcafef00d"
ops := testProg(t, text, v)
s := hex.EncodeToString(ops.Program)
require.Equal(t, mutateProgVersion(v, expected), s)
@@ -888,9 +912,30 @@ func TestAssembleBytes(t *testing.T) {
expectedDefaultConsts := "0126010661626364656628"
expectedOptimizedConsts := "018006616263646566"
- bad := [][]string{
- {"byte", "...needs byte literal argument"},
- {`byte "john" "doe"`, "...with extraneous argument"},
+ bad := []struct {
+ source string
+ msg string
+ col int
+ }{
+ {"byte", "...needs byte literal argument", 4},
+ {`byte "john" "doe"`, "...with extraneous argument", 12},
+ {"byte base64", "... base64 needs...", 5}, // maybe we should report error at end of "base64"?
+ {"byte base32", "... base32 needs...", 5},
+ // these next messages could have the exact column of the problem, but
+ // would require too much refactoring for the value
+ {`byte "jo\qhn"`, "...invalid escape sequence...", 5},
+ {`byte base64(YWJjZGVm)extrajunk`, "...must end at first closing parenthesis", 5},
+ {`byte base64(YWJjZGVm)x`, "...must end at first closing parenthesis", 5},
+ {`byte base64(YWJjZGVm`, "...lacks closing parenthesis", 5},
+ {`byte base32(MFRGGZDFMY)extrajunk`, "...must end at first closing parenthesis", 5},
+ {`byte base32(MFRGGZDFMY)x`, "...must end at first closing parenthesis", 5},
+ {`byte base32(MFRGGZDFMY`, "...lacks closing parenthesis", 5},
+ {`byte b32 mfrggzdfmy`, "...illegal base32 data at input byte 0", 9},
+ {`byte b32 MFrggzdfmy`, "...illegal base32 data at input byte 2", 9},
+ {`byte b32(mfrggzdfmy)`, "...illegal base32 data at input byte 4", 5},
+ {`byte b32(MFrggzdfmy)`, "...illegal base32 data at input byte 6", 5},
+ {`byte base32(mfrggzdfmy)`, "...illegal base32 data at input byte 7", 5},
+ {`byte base32(MFrggzdfmy)`, "...illegal base32 data at input byte 9", 5},
}
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
@@ -911,10 +956,10 @@ func TestAssembleBytes(t *testing.T) {
}
for _, b := range bad {
- testProg(t, b[0], v, Expect{1, b[1]})
+ testProg(t, b.source, v, exp(1, b.msg, b.col))
// pushbytes should produce the same errors
if v >= 3 {
- testProg(t, strings.Replace(b[0], "byte", "pushbytes", 1), v, Expect{1, b[1]})
+ testProg(t, strings.Replace(b.source, "byte", "pushbytes", 1), v, exp(1, b.msg, b.col+len("pushs")))
}
}
})
@@ -965,13 +1010,13 @@ func TestManualCBlocks(t *testing.T) {
"bytecblock 0x44 0x55 0x4455; bytec_0; byte 0x55; concat; bytec_2; ==")
// But complain if they do not
- testProg(t, "intcblock 4; int 3;", 3, Expect{1, "int 3 used without 3 in intcblock"})
- testProg(t, "bytecblock 0x44; byte 0x33;", 3, Expect{1, "byte/addr/method used without value in bytecblock"})
+ testProg(t, "intcblock 4; int 3;", 3, exp(1, "value 3 does not appear...", 17))
+ testProg(t, "bytecblock 0x44; byte 0x33;", 3, exp(1, "value 0x33 does not appear...", 22))
// Or if the ref comes before the constant block, even if they match
- testProg(t, "int 5; intcblock 4;", 3, Expect{1, "intcblock following int"})
- testProg(t, "int 4; intcblock 4;", 3, Expect{1, "intcblock following int"})
- testProg(t, "addr RWXCBB73XJITATVQFOI7MVUUQOL2PFDDSDUMW4H4T2SNSX4SEUOQ2MM7F4; bytecblock 0x44", 3, Expect{1, "bytecblock following byte/addr/method"})
+ testProg(t, "int 5; intcblock 4;", 3, exp(1, "intcblock following int", 7))
+ testProg(t, "int 4; intcblock 4;", 3, exp(1, "intcblock following int", 7))
+ testProg(t, "addr RWXCBB73XJITATVQFOI7MVUUQOL2PFDDSDUMW4H4T2SNSX4SEUOQ2MM7F4; bytecblock 0x44", 3, exp(1, "bytecblock following byte/addr/method"))
// But we can't complain precisely once backjumps are allowed, so we force
// compile to push*. (We don't analyze the CFG, so we don't know if we can
@@ -984,9 +1029,9 @@ func TestManualCBlocks(t *testing.T) {
"bytecblock 0x44 0x55 0x4455; byte 0x44; byte 0x55; concat; byte 0x4455; ==",
"bytecblock 0x44 0x55 0x4455; pushbytes 0x44; pushbytes 0x55; concat; pushbytes 0x4455; ==")
// Can't switch to push* after the fact.
- testProg(t, "int 5; intcblock 4;", 4, Expect{1, "intcblock following int"})
- testProg(t, "int 4; intcblock 4;", 4, Expect{1, "intcblock following int"})
- testProg(t, "addr RWXCBB73XJITATVQFOI7MVUUQOL2PFDDSDUMW4H4T2SNSX4SEUOQ2MM7F4; bytecblock 0x44", 4, Expect{1, "bytecblock following byte/addr/method"})
+ testProg(t, "int 5; intcblock 4;", 4, exp(1, "intcblock following int", 7))
+ testProg(t, "int 4; intcblock 4;", 4, exp(1, "intcblock following int", 7))
+ testProg(t, "addr RWXCBB73XJITATVQFOI7MVUUQOL2PFDDSDUMW4H4T2SNSX4SEUOQ2MM7F4; bytecblock 0x44", 4, exp(1, "bytecblock following byte/addr/method", 65))
// Ignore manually added cblocks in deadcode, so they can be added easily to
// existing programs. There are proposals to put metadata there.
@@ -1017,12 +1062,12 @@ func TestManualCBlocksPreBackBranch(t *testing.T) {
testProg(t, "intcblock 10 20; int 10;", backBranchEnabledVersion-1)
// By the same token, assembly complains if that intcblock doesn't have the
// constant. In v3, and v3 only, it *could* pushint.
- testProg(t, "intcblock 10 20; int 30;", backBranchEnabledVersion-1, Expect{1, "int 30 used..."})
+ testProg(t, "intcblock 10 20; int 30;", backBranchEnabledVersion-1, exp(1, "value 30 does not appear..."))
// Since the second intcblock is dead, the `int 10` "sees" the first block, not the second
testProg(t, "intcblock 10 20; b skip; intcblock 3 4 5; skip: int 10;", backBranchEnabledVersion-1)
testProg(t, "intcblock 10 20; b skip; intcblock 3 4 5; skip: int 3;", backBranchEnabledVersion-1,
- Expect{1, "int 3 used..."})
+ exp(1, "value 3 does not appear...", 52))
// Here, the intcblock in effect is unknowable, better to force the user to
// use intc (unless pushint is available to save the day).
@@ -1033,23 +1078,23 @@ func TestManualCBlocksPreBackBranch(t *testing.T) {
// backBranchEnabledVersion-2 does not
testProg(t, "intcblock 10 20; txn NumAppArgs; bz skip; intcblock 3 4 5; skip: int 10;", backBranchEnabledVersion-2,
- Expect{1, "int 10 used with manual intcblocks. Use intc."})
+ exp(1, "int 10 used with manual intcblocks. Use intc.", 65))
testProg(t, "intcblock 10 20; txn NumAppArgs; bz skip; intcblock 3 4 5; skip: int 3;", backBranchEnabledVersion-2,
- Expect{1, "int 3 used with manual intcblocks. Use intc."})
+ exp(1, "int 3 used with manual intcblocks. Use intc.", 65))
// REPEAT ABOVE, BUT FOR BYTE BLOCKS
testProg(t, "bytecblock 0x10 0x20; byte 0x10;", backBranchEnabledVersion-1)
- testProg(t, "bytecblock 0x10 0x20; byte 0x30;", backBranchEnabledVersion-1, Expect{1, "byte/addr/method used..."})
+ testProg(t, "bytecblock 0x10 0x20; byte 0x30;", backBranchEnabledVersion-1, exp(1, "value 0x30 does not appear...", 27))
testProg(t, "bytecblock 0x10 0x20; b skip; bytecblock 0x03 0x04 0x05; skip: byte 0x10;", backBranchEnabledVersion-1)
testProg(t, "bytecblock 0x10 0x20; b skip; bytecblock 0x03 0x04 0x05; skip: byte 0x03;", backBranchEnabledVersion-1,
- Expect{1, "byte/addr/method used..."})
+ exp(1, "value 0x03 does not appear...", 68))
testProg(t, "bytecblock 0x10 0x20; txn NumAppArgs; bz skip; bytecblock 0x03 0x04 0x05; skip: byte 0x10;", backBranchEnabledVersion-1)
testProg(t, "bytecblock 0x10 0x20; txn NumAppArgs; bz skip; bytecblock 0x03 0x04 0x05; skip: byte 0x03;", backBranchEnabledVersion-1)
testProg(t, "bytecblock 0x10 0x20; txn NumAppArgs; bz skip; bytecblock 0x03 0x04 0x05; skip: byte 0x10;", backBranchEnabledVersion-2,
- Expect{1, "byte 0x10 used with manual bytecblocks. Use bytec."})
+ exp(1, "byte 0x10 used with manual bytecblocks. Use bytec."))
testProg(t, "bytecblock 0x10 0x20; txn NumAppArgs; bz skip; bytecblock 0x03 0x04 0x05; skip: byte 0x03;", backBranchEnabledVersion-2,
- Expect{1, "byte 0x03 used with manual bytecblocks. Use bytec."})
+ exp(1, "byte 0x03 used with manual bytecblocks. Use bytec."))
}
func TestAssembleOptimizedConstants(t *testing.T) {
@@ -1283,13 +1328,25 @@ int ClearState
}
}
-func TestFieldsFromLine(t *testing.T) {
+func stringsOf(tokens []token) []string {
+ if tokens == nil {
+ return nil
+ }
+ out := make([]string, len(tokens))
+ for i, t := range tokens {
+ out[i] = t.str
+ }
+ return out
+}
+
+func TestTokensFromLine(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
check := func(line string, tokens ...string) {
t.Helper()
- assert.Equal(t, tokensFromLine(line), tokens)
+ fromLine := stringsOf(tokensFromLine(line, 0))
+ assert.Equal(t, fromLine, tokens)
}
check("op arg", "op", "arg")
@@ -1343,38 +1400,29 @@ func TestNextStatement(t *testing.T) {
// this test ensures nextStatement splits tokens on semicolons properly
// macro testing should be handled in TestMacros
ops := newOpStream(AssemblerMaxVersion)
- check := func(tokens []string, left []string, right []string) {
+ check := func(line string, left []string, right []string) {
t.Helper()
+ tokens := tokensFromLine(line, 0)
current, next := nextStatement(&ops, tokens)
- assert.Equal(t, left, current)
- assert.Equal(t, right, next)
+ assert.Equal(t, left, stringsOf(current))
+ assert.Equal(t, right, stringsOf(next))
}
- check([]string{"hey,", "how's", ";", ";", "it", "going", ";"},
+ check(`hey, how's ; ; it going ;`,
[]string{"hey,", "how's"},
[]string{";", "it", "going", ";"},
)
- check([]string{";"},
- []string{},
- []string{},
- )
+ check(";", []string{}, []string{})
- check([]string{";", "it", "going"},
- []string{},
- []string{"it", "going"},
- )
+ check(`; it going`, []string{}, []string{"it", "going"})
- check([]string{"hey,", "how's"},
- []string{"hey,", "how's"},
- nil,
- )
+ check(`hey, how's`, []string{"hey,", "how's"}, nil)
- check([]string{`"hey in quotes;"`, "getting", `";"`, ";", "tricky"},
+ check(`"hey in quotes;" getting ";" ; tricky`,
[]string{`"hey in quotes;"`, "getting", `";"`},
[]string{"tricky"},
)
-
}
func TestAssembleRejectNegJump(t *testing.T) {
@@ -1387,7 +1435,7 @@ bnz wat
int 2`
for v := uint64(1); v < backBranchEnabledVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- testProg(t, source, v, Expect{3, "label \"wat\" is a back reference..."})
+ testProg(t, source, v, exp(3, "label \"wat\" is a back reference...", 4))
})
}
for v := uint64(backBranchEnabledVersion); v <= AssemblerMaxVersion; v++ {
@@ -1441,7 +1489,23 @@ bnz nowhere
int 2`
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- testProg(t, source, v, Expect{2, "reference to undefined label \"nowhere\""})
+ testProg(t, source, v, exp(2, "reference to undefined label \"nowhere\"", 4))
+ })
+ }
+}
+
+func TestAssembleRejectDupLabel(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ source := `
+ XXX: int 1; pop;
+ XXX: int 1; pop; // different indent to prove the returned column is from the right label
+ int 1
+`
+ for v := uint64(1); v <= AssemblerMaxVersion; v++ {
+ t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
+ testProg(t, source, v, exp(3, "duplicate label \"XXX\"", 2))
})
}
}
@@ -1474,8 +1538,8 @@ int 2`
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
testProg(t, source, v,
- Expect{2, "reference to undefined label \"nowhere\""},
- Expect{4, "txn unknown field: \"XYZ\""})
+ exp(2, "reference to undefined label \"nowhere\"", 4),
+ exp(4, "txn unknown field: \"XYZ\"", 4))
})
}
}
@@ -1617,15 +1681,11 @@ func TestAssembleDisassembleCycle(t *testing.T) {
t.Parallel()
// Test that disassembly re-assembles to the same program bytes.
- // Disassembly won't necessarily perfectly recreate the source text, but assembling the result of Disassemble() should be the same program bytes.
- // This confirms that each program compiles to the same bytes
- // (except the leading version indicator), when compiled under
- // original version, unspecified version (so it should pick up
- // the pragma) and current version with pragma removed. That
- // doesn't *have* to be true, as we can introduce
- // optimizations in later versions that change the bytecode
- // emitted. But currently it is, so we test it for now to
- // catch any suprises.
+ // Disassembly won't necessarily perfectly recreate the source text, but
+ // assembling the result of Disassemble() should be the same program bytes.
+ // To confirm that, the disassembly output must be re-assembled. Since it
+ // has a #pragma version, we re-assemble with assemblerNoVersion to let the
+ // assembler pick it up.
require.LessOrEqual(t, LogicVersion, len(nonsense)) // Allow nonsense for future versions
for v, source := range nonsense {
v, source := v, source
@@ -1637,11 +1697,9 @@ func TestAssembleDisassembleCycle(t *testing.T) {
ops := testProg(t, source, v)
t2, err := Disassemble(ops.Program)
require.NoError(t, err)
- none := testProg(t, notrack(t2), assemblerNoVersion)
- require.Equal(t, ops.Program[1:], none.Program[1:])
- t3 := "// " + t2 // This comments out the #pragma version
- current := testProg(t, notrack(t3), AssemblerMaxVersion)
- require.Equal(t, ops.Program[1:], current.Program[1:])
+ // we use pragma notrack in nonsense to avoid tracking types, that is lost in disassembly
+ reassembled := testProg(t, notrack(t2), assemblerNoVersion)
+ require.Equal(t, ops.Program, reassembled.Program)
})
}
}
@@ -1682,20 +1740,48 @@ func TestConstantArgs(t *testing.T) {
t.Parallel()
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
- testProg(t, "int", v, Expect{1, "int needs one immediate argument, was given 0"})
- testProg(t, "int 1 2", v, Expect{1, "int needs one immediate argument, was given 2"})
- testProg(t, "intc", v, Expect{1, "intc needs one immediate argument, was given 0"})
- testProg(t, "intc hi bye", v, Expect{1, "intc needs one immediate argument, was given 2"})
- testProg(t, "byte", v, Expect{1, "byte needs byte literal argument"})
- testProg(t, "bytec", v, Expect{1, "bytec needs one immediate argument, was given 0"})
- testProg(t, "bytec 1 x", v, Expect{1, "bytec needs one immediate argument, was given 2"})
- testProg(t, "addr", v, Expect{1, "addr needs one immediate argument, was given 0"})
- testProg(t, "addr x y", v, Expect{1, "addr needs one immediate argument, was given 2"})
+ testProg(t, "int", v, exp(1, "int expects 1 immediate argument", 3))
+ testProg(t, "int pay", v)
+ testProg(t, "int pya", v, exp(1, "unable to parse \"pya\" as integer", 4))
+ testProg(t, "int 1 2", v, exp(1, "int expects 1 immediate argument", 6))
+ testProg(t, "intc", v, exp(1, "intc expects 1 immediate argument", 4))
+ testProg(t, "intc pay", v, exp(1, "unable to parse constant \"pay\" as integer", 5)) // don't accept "pay" constant
+ testProg(t, "intc hi bye", v, exp(1, "intc expects 1 immediate argument", 8))
+ testProg(t, "byte", v, exp(1, "byte needs byte literal argument", 4))
+ testProg(t, "byte b32", v, exp(1, "byte b32 needs byte literal argument"))
+ testProg(t, "byte 0xaa 0xbb", v, exp(1, "byte with extraneous argument", 10))
+ testProg(t, "byte b32 MFRGGZDFMY MFRGGZDFMY", v, exp(1, "byte with extraneous argument", 20))
+ testProg(t, "bytec", v, exp(1, "bytec expects 1 immediate argument"))
+ testProg(t, "bytec 1 x", v, exp(1, "bytec expects 1 immediate argument"))
+ testProg(t, "bytec pay", v, exp(1, "unable to parse constant \"pay\" as integer", 6)) // don't accept "pay" constant
+ testProg(t, "addr", v, exp(1, "addr expects 1 immediate argument", 4))
+ testProg(t, "addr x y", v, exp(1, "addr expects 1 immediate argument", 9))
+ testProg(t, "addr x", v, exp(1, "failed to decode address x ...", 5))
+ testProg(t, "method", v, exp(1, "method expects 1 immediate argument", 6))
+ testProg(t, "method xx yy", v, exp(1, "method expects 1 immediate argument", 10))
}
for v := uint64(3); v <= AssemblerMaxVersion; v++ {
- testProg(t, "pushint", v, Expect{1, "pushint needs one immediate argument, was given 0"})
- testProg(t, "pushint 3 4", v, Expect{1, "pushint needs one immediate argument, was given 2"})
- testProg(t, "pushbytes", v, Expect{1, "pushbytes needs byte literal argument"})
+ testProg(t, "pushint", v, exp(1, "pushint expects 1 immediate argument"))
+ testProg(t, "pushint 3 4", v, exp(1, "pushint expects 1 immediate argument"))
+ testProg(t, "pushint x", v, exp(1, "unable to parse \"x\" as integer", 8))
+ testProg(t, "pushbytes", v, exp(1, "pushbytes needs byte literal argument"))
+ testProg(t, "pushbytes b32", v, exp(1, "pushbytes b32 needs byte literal argument"))
+ testProg(t, "pushbytes b32(MFRGGZDFMY", v, exp(1, "pushbytes argument b32(MFRGGZDFMY lacks closing parenthesis"))
+ testProg(t, "pushbytes b32(MFRGGZDFMY)X", v, exp(1, "pushbytes argument b32(MFRGGZDFMY)X must end at first closing parenthesis"))
+ }
+
+ for v := uint64(8); v <= AssemblerMaxVersion; v++ {
+ testProg(t, "pushints", v)
+ testProg(t, "pushints 200", v)
+ testProg(t, "pushints 3 4", v)
+
+ testProg(t, "pushbytess", v)
+ testProg(t, "pushbytess 0xff", v)
+ testProg(t, "pushbytess 0xaa 0xbb", v)
+
+ testProg(t, "pushbytess b32(MFRGGZDFMY) b32(MFRGGZDFMY)", v)
+ testProg(t, "pushbytess b32 MFRGGZDFMY b32 MFRGGZDFMY", v)
+ testProg(t, "pushbytess b32(MFRGGZDFMY b32(MFRGGZDFMY)", v, exp(1, "pushbytess argument b32(MFRGGZDFMY lacks closing parenthesis"))
}
}
@@ -1704,17 +1790,17 @@ func TestBranchArgs(t *testing.T) {
t.Parallel()
for v := uint64(2); v <= AssemblerMaxVersion; v++ {
- testProg(t, "b", v, Expect{1, "b needs a single label argument"})
- testProg(t, "b lab1 lab2", v, Expect{1, "b needs a single label argument"})
- testProg(t, "int 1; bz", v, Expect{1, "bz needs a single label argument"})
- testProg(t, "int 1; bz a b", v, Expect{1, "bz needs a single label argument"})
- testProg(t, "int 1; bnz", v, Expect{1, "bnz needs a single label argument"})
- testProg(t, "int 1; bnz c d", v, Expect{1, "bnz needs a single label argument"})
+ testProg(t, "b", v, exp(1, "b expects 1 immediate argument"))
+ testProg(t, "b lab1 lab2", v, exp(1, "b expects 1 immediate argument"))
+ testProg(t, "int 1; bz", v, exp(1, "bz expects 1 immediate argument"))
+ testProg(t, "int 1; bz a b", v, exp(1, "bz expects 1 immediate argument"))
+ testProg(t, "int 1; bnz", v, exp(1, "bnz expects 1 immediate argument"))
+ testProg(t, "int 1; bnz c d", v, exp(1, "bnz expects 1 immediate argument"))
}
for v := uint64(4); v <= AssemblerMaxVersion; v++ {
- testProg(t, "callsub", v, Expect{1, "callsub needs a single label argument"})
- testProg(t, "callsub one two", v, Expect{1, "callsub needs a single label argument"})
+ testProg(t, "callsub", v, exp(1, "callsub expects 1 immediate argument"))
+ testProg(t, "callsub one two", v, exp(1, "callsub expects 1 immediate argument"))
}
}
@@ -1722,117 +1808,101 @@ func TestAssembleDisassembleErrors(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- source := `txn Sender`
- ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
- ops.Program[2] = 0x50 // txn field
- _, err = Disassemble(ops.Program)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid immediate f for txn")
-
- source = `txna Accounts 0`
- ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
- ops.Program[2] = 0x50 // txn field
- _, err = Disassemble(ops.Program)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid immediate f for txna")
-
- source = `gtxn 0 Sender`
- ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
- ops.Program[3] = 0x50 // txn field
- _, err = Disassemble(ops.Program)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid immediate f for gtxn")
-
- source = `gtxna 0 Accounts 0`
- ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
- ops.Program[3] = 0x50 // txn field
- _, err = Disassemble(ops.Program)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid immediate f for gtxna")
-
- source = `global MinTxnFee`
- ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
- ops.Program[2] = 0x50 // txn field
- _, err = Disassemble(ops.Program)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid immediate f for global")
-
- ops.Program[0] = 0x11 // version
- out, err := Disassemble(ops.Program)
- require.NoError(t, err)
- require.Contains(t, out, "unsupported version")
-
- ops.Program[0] = 0x01 // version
- ops.Program[1] = 0xFF // first opcode
- _, err = Disassemble(ops.Program)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid opcode")
-
- source = "int 0\nint 0\nasset_holding_get AssetFrozen"
- ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
- ops.Program[7] = 0x50 // holding field
- _, err = Disassemble(ops.Program)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid immediate f for asset_holding_get")
-
- source = "int 0\nasset_params_get AssetTotal"
- ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
- ops.Program[4] = 0x50 // params field
- _, err = Disassemble(ops.Program)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid immediate f for asset_params_get")
-
- source = "int 0\nasset_params_get AssetTotal"
- ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
- _, err = Disassemble(ops.Program)
- require.NoError(t, err)
- ops.Program = ops.Program[0 : len(ops.Program)-1]
- _, err = Disassemble(ops.Program)
- require.Error(t, err)
- require.Contains(t, err.Error(), "program end while reading immediate f for asset_params_get")
-
- source = "gtxna 0 Accounts 0"
- ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
- _, err = Disassemble(ops.Program)
- require.NoError(t, err)
- _, err = Disassemble(ops.Program[0 : len(ops.Program)-1])
- require.Error(t, err)
- require.Contains(t, err.Error(), "program end while reading immediate i for gtxna")
- _, err = Disassemble(ops.Program[0 : len(ops.Program)-2])
- require.Error(t, err)
- require.Contains(t, err.Error(), "program end while reading immediate f for gtxna")
- _, err = Disassemble(ops.Program[0 : len(ops.Program)-3])
- require.Error(t, err)
- require.Contains(t, err.Error(), "program end while reading immediate t for gtxna")
-
- source = "txna Accounts 0"
- ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
- _, err = Disassemble(ops.Program)
- require.NoError(t, err)
- ops.Program = ops.Program[0 : len(ops.Program)-1]
- _, err = Disassemble(ops.Program)
- require.Error(t, err)
- require.Contains(t, err.Error(), "program end while reading immediate i for txna")
-
- source = "byte 0x4141\nsubstring 0 1"
- ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
- _, err = Disassemble(ops.Program)
- require.NoError(t, err)
- ops.Program = ops.Program[0 : len(ops.Program)-1]
- _, err = Disassemble(ops.Program)
- require.Error(t, err)
- require.Contains(t, err.Error(), "program end while reading immediate e for substring")
+ for v := uint64(2); v <= AssemblerMaxVersion; v++ {
+ t.Run(fmt.Sprintf("v%d", v), func(t *testing.T) {
+ source := `txn Sender`
+ ops := testProg(t, source, v)
+ ops.Program[len(ops.Program)-1] = 0x50 // txn field
+ dis, err := Disassemble(ops.Program)
+ require.Error(t, err, dis)
+ require.Contains(t, err.Error(), "invalid immediate f for txn")
+
+ source = `txna Accounts 0`
+ ops = testProg(t, source, v)
+ ops.Program[len(ops.Program)-2] = 0x50 // txn field
+ dis, err = Disassemble(ops.Program)
+ require.Error(t, err, dis)
+ require.Contains(t, err.Error(), "invalid immediate f for txna")
+
+ source = `gtxn 0 Sender`
+ ops = testProg(t, source, v)
+ ops.Program[len(ops.Program)-1] = 0x50 // txn field
+ dis, err = Disassemble(ops.Program)
+ require.Error(t, err, dis)
+ require.Contains(t, err.Error(), "invalid immediate f for gtxn")
+
+ source = `gtxna 0 Accounts 0`
+ ops = testProg(t, source, v)
+ ops.Program[len(ops.Program)-2] = 0x50 // txn field
+ dis, err = Disassemble(ops.Program)
+ require.Error(t, err, dis)
+ require.Contains(t, err.Error(), "invalid immediate f for gtxna")
+
+ source = `global MinTxnFee`
+ ops = testProg(t, source, v)
+ ops.Program[len(ops.Program)-1] = 0x50 // txn field
+ _, err = Disassemble(ops.Program)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "invalid immediate f for global")
+
+ ops.Program[0] = 0x11 // version
+ out, err := Disassemble(ops.Program)
+ require.NoError(t, err)
+ require.Contains(t, out, "unsupported version")
+
+ ops.Program[0] = 0x01 // version
+ ops.Program[1] = 0xFF // first opcode
+ dis, err = Disassemble(ops.Program)
+ require.Error(t, err, dis)
+ require.Contains(t, err.Error(), "invalid opcode")
+
+ source = "int 0; int 0\nasset_holding_get AssetFrozen"
+ ops = testProg(t, source, v)
+ ops.Program[len(ops.Program)-1] = 0x50 // holding field
+ dis, err = Disassemble(ops.Program)
+ require.Error(t, err, dis)
+ require.Contains(t, err.Error(), "invalid immediate f for")
+
+ source = "int 0\nasset_params_get AssetTotal"
+ ops = testProg(t, source, v)
+ ops.Program[len(ops.Program)-1] = 0x50 // params field
+ dis, err = Disassemble(ops.Program)
+ require.Error(t, err, dis)
+ require.Contains(t, err.Error(), "invalid immediate f for")
+
+ source = "int 0\nasset_params_get AssetTotal"
+ ops = testProg(t, source, v)
+ ops.Program = ops.Program[0 : len(ops.Program)-1]
+ dis, err = Disassemble(ops.Program)
+ require.Error(t, err, dis)
+ require.Contains(t, err.Error(), "program end while reading immediate f for")
+
+ source = "gtxna 0 Accounts 0"
+ ops = testProg(t, source, v)
+ dis, err = Disassemble(ops.Program[0 : len(ops.Program)-1])
+ require.Error(t, err, dis)
+ require.Contains(t, err.Error(), "program end while reading immediate i for gtxna")
+ dis, err = Disassemble(ops.Program[0 : len(ops.Program)-2])
+ require.Error(t, err, dis)
+ require.Contains(t, err.Error(), "program end while reading immediate f for gtxna")
+ dis, err = Disassemble(ops.Program[0 : len(ops.Program)-3])
+ require.Error(t, err, dis)
+ require.Contains(t, err.Error(), "program end while reading immediate t for gtxna")
+
+ source = "txna Accounts 0"
+ ops = testProg(t, source, v)
+ ops.Program = ops.Program[0 : len(ops.Program)-1]
+ dis, err = Disassemble(ops.Program)
+ require.Error(t, err, dis)
+ require.Contains(t, err.Error(), "program end while reading immediate i for txna")
+
+ source = "byte 0x4141\nsubstring 0 1"
+ ops = testProg(t, source, v)
+ dis, err = Disassemble(ops.Program[0 : len(ops.Program)-1])
+ require.Error(t, err, dis)
+ require.Contains(t, err.Error(), "program end while reading immediate e for substring")
+ })
+ }
}
func TestAssembleVersions(t *testing.T) {
@@ -1853,7 +1923,7 @@ balance
int 1
==`
for v := uint64(2); v < directRefEnabledVersion; v++ {
- testProg(t, source, v, Expect{2, "balance arg 0 wanted type uint64 got []byte"})
+ testProg(t, source, v, exp(2, "balance arg 0 wanted type uint64 got [1]byte"))
}
for v := uint64(directRefEnabledVersion); v <= AssemblerMaxVersion; v++ {
testProg(t, source, v)
@@ -1869,7 +1939,7 @@ min_balance
int 1
==`
for v := uint64(3); v < directRefEnabledVersion; v++ {
- testProg(t, source, v, Expect{2, "min_balance arg 0 wanted type uint64 got []byte"})
+ testProg(t, source, v, exp(2, "min_balance arg 0 wanted type uint64 got [1]byte"))
}
for v := uint64(directRefEnabledVersion); v <= AssemblerMaxVersion; v++ {
testProg(t, source, v)
@@ -1881,28 +1951,31 @@ func TestAssembleAsset(t *testing.T) {
t.Parallel()
for v := uint64(2); v <= AssemblerMaxVersion; v++ {
- testProg(t, "asset_holding_get ABC 1", v,
- Expect{1, "asset_holding_get ABC 1 expects 2 stack arguments..."})
- testProg(t, "int 1; asset_holding_get ABC 1", v,
- Expect{1, "asset_holding_get ABC 1 expects 2 stack arguments..."})
- testProg(t, "int 1; int 1; asset_holding_get ABC 1", v,
- Expect{1, "asset_holding_get expects 1 immediate argument"})
- testProg(t, "int 1; int 1; asset_holding_get ABC", v,
- Expect{1, "asset_holding_get unknown field: \"ABC\""})
-
- testProg(t, "byte 0x1234; asset_params_get ABC 1", v,
- Expect{1, "asset_params_get ABC 1 arg 0 wanted type uint64..."})
-
- // Test that AssetUnitName is known to return bytes
- testProg(t, "int 1; asset_params_get AssetUnitName; pop; int 1; +", v,
- Expect{1, "+ arg 0 wanted type uint64..."})
-
- // Test that AssetTotal is known to return uint64
- testProg(t, "int 1; asset_params_get AssetTotal; pop; byte 0x12; concat", v,
- Expect{1, "concat arg 0 wanted type []byte..."})
-
- testLine(t, "asset_params_get ABC 1", v, "asset_params_get expects 1 immediate argument")
- testLine(t, "asset_params_get ABC", v, "asset_params_get unknown field: \"ABC\"")
+ t.Run(strconv.Itoa(int(v)), func(t *testing.T) {
+ testProg(t, "asset_holding_get ABC 1", v,
+ exp(1, "asset_holding_get ABC 1 expects 2 stack arguments..."))
+ testProg(t, "int 1; asset_holding_get ABC 1", v,
+ exp(1, "asset_holding_get ABC 1 expects 2 stack arguments..."))
+
+ testProg(t, "int 1; int 1; asset_holding_get ABC 1", v,
+ exp(1, "asset_holding_get expects 1 immediate argument"))
+ testProg(t, "int 1; int 1; asset_holding_get ABC", v,
+ exp(1, "asset_holding_get unknown field: \"ABC\""))
+
+ testProg(t, "byte 0x1234; asset_params_get ABC 1", v,
+ exp(1, "asset_params_get ABC 1 arg 0 wanted type uint64..."))
+
+ // Test that AssetUnitName is known to return bytes
+ testProg(t, "int 1; asset_params_get AssetUnitName; pop; int 1; +", v,
+ exp(1, "+ arg 0 wanted type uint64..."))
+
+ // Test that AssetTotal is known to return uint64
+ testProg(t, "int 1; asset_params_get AssetTotal; pop; byte 0x12; concat", v,
+ exp(1, "concat arg 0 wanted type []byte..."))
+
+ testLine(t, "asset_params_get ABC 1", v, "asset_params_get expects 1 immediate argument")
+ testLine(t, "asset_params_get ABC", v, "asset_params_get unknown field: \"ABC\"")
+ })
}
}
@@ -2153,21 +2226,27 @@ func TestHasStatefulOps(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- source := "int 1"
- ops := testProg(t, source, AssemblerMaxVersion)
- has, err := HasStatefulOps(ops.Program)
- require.NoError(t, err)
- require.False(t, has)
+ for v := uint64(2); v <= AssemblerMaxVersion; v++ {
+ t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
+ source := "int 1"
+ ops := testProg(t, source, v)
+ has, err := HasStatefulOps(ops.Program)
+ require.NoError(t, err)
+ require.False(t, has)
- source = `int 1
-int 1
-app_opted_in
-err
-`
- ops = testProg(t, source, AssemblerMaxVersion)
- has, err = HasStatefulOps(ops.Program)
- require.NoError(t, err)
- require.True(t, has)
+ source = `int 0; int 1; app_opted_in; err`
+ ops = testProg(t, source, v)
+ has, err = HasStatefulOps(ops.Program)
+ require.NoError(t, err)
+ require.True(t, has)
+
+ source = `int 1; asset_params_get AssetURL; err`
+ ops = testProg(t, source, v)
+ has, err = HasStatefulOps(ops.Program)
+ require.NoError(t, err)
+ require.True(t, has)
+ })
+ }
}
func TestStringLiteralParsing(t *testing.T) {
@@ -2258,7 +2337,7 @@ func TestStringLiteralParsing(t *testing.T) {
s = `"test\a"`
result, err = parseStringLiteral(s)
- require.EqualError(t, err, "invalid escape seq \\a")
+ require.EqualError(t, err, "invalid escape sequence \\a")
require.Nil(t, result)
s = `"test\x10\x1"`
@@ -2278,15 +2357,18 @@ func TestPragmas(t *testing.T) {
}
testProg(t, `#pragma version 100`, assemblerNoVersion,
- Expect{1, "unsupported version: 100"})
+ exp(1, "unsupported version: 100"))
- testProg(t, `int 1`, 99, Expect{0, "Can not assemble version 99"})
+ testProg(t, `int 1`, 99, exp(0, "Can not assemble version 99"))
// Allow this on the off chance someone needs to reassemble an old logigsig
testProg(t, `#pragma version 0`, assemblerNoVersion)
testProg(t, `#pragma version a`, assemblerNoVersion,
- Expect{1, `bad #pragma version: "a"`})
+ exp(1, `bad #pragma version: "a"`))
+
+ testProg(t, `#pramga version 3`, assemblerNoVersion,
+ exp(1, "unknown directive: pramga"))
// will default to 1
ops := testProg(t, "int 3", assemblerNoVersion)
@@ -2300,24 +2382,24 @@ func TestPragmas(t *testing.T) {
require.Equal(t, uint64(2), ops.Version)
// changing version is not allowed
- testProg(t, "#pragma version 1", 2, Expect{1, "version mismatch..."})
- testProg(t, "#pragma version 2", 1, Expect{1, "version mismatch..."})
+ testProg(t, "#pragma version 1", 2, exp(1, "version mismatch..."))
+ testProg(t, "#pragma version 2", 1, exp(1, "version mismatch..."))
testProg(t, "#pragma version 2\n#pragma version 1", assemblerNoVersion,
- Expect{2, "version mismatch..."})
+ exp(2, "version mismatch..."))
// repetitive, but fine
ops = testProg(t, "#pragma version 2\n#pragma version 2", assemblerNoVersion)
require.Equal(t, uint64(2), ops.Version)
testProg(t, "\nint 1\n#pragma version 2", assemblerNoVersion,
- Expect{3, "#pragma version is only allowed before instructions"})
+ exp(3, "#pragma version is only allowed before instructions"))
testProg(t, "#pragma run-mode 2", assemblerNoVersion,
- Expect{1, `unsupported pragma directive: "run-mode"`})
+ exp(1, `unsupported pragma directive: "run-mode"`))
testProg(t, "#pragma versions", assemblerNoVersion,
- Expect{1, `unsupported pragma directive: "versions"`})
+ exp(1, `unsupported pragma directive: "versions"`))
ops = testProg(t, "#pragma version 1", assemblerNoVersion)
require.Equal(t, uint64(1), ops.Version)
@@ -2325,13 +2407,33 @@ func TestPragmas(t *testing.T) {
ops = testProg(t, "\n#pragma version 1", assemblerNoVersion)
require.Equal(t, uint64(1), ops.Version)
- testProg(t, "#pragma", assemblerNoVersion, Expect{1, "empty pragma"})
+ testProg(t, "#pragma", assemblerNoVersion, exp(1, "empty pragma"))
testProg(t, "#pragma version", assemblerNoVersion,
- Expect{1, "no version value"})
+ exp(1, "no version value"))
ops = testProg(t, " #pragma version 5 ", assemblerNoVersion)
require.Equal(t, uint64(5), ops.Version)
+
+ testProg(t, "#pragma version 5 blah", assemblerNoVersion,
+ exp(1, "unexpected extra tokens: blah"))
+
+ testProg(t, "#pragma typetrack", assemblerNoVersion,
+ exp(1, "no typetrack value"))
+
+ testProg(t, "#pragma typetrack blah", assemblerNoVersion,
+ exp(1, `bad #pragma typetrack: "blah"`))
+
+ testProg(t, "#pragma typetrack false blah", assemblerNoVersion,
+ exp(1, "unexpected extra tokens: blah"))
+
+ // Currently pragmas don't treat semicolons as newlines. It would probably
+ // be nice to fix this.
+ testProg(t, "#pragma version 5; int 1", assemblerNoVersion,
+ exp(1, "unexpected extra tokens: ; int 1"))
+
+ testProg(t, "#pragma typetrack false; int 1", assemblerNoVersion,
+ exp(1, "unexpected extra tokens: ; int 1"))
}
func TestAssemblePragmaVersion(t *testing.T) {
@@ -2345,8 +2447,8 @@ int 1
ops1 := testProg(t, "int 1", 1)
require.Equal(t, ops1.Program, ops.Program)
- testProg(t, text, 0, Expect{1, "version mismatch..."})
- testProg(t, text, 2, Expect{1, "version mismatch..."})
+ testProg(t, text, 0, exp(1, "version mismatch..."))
+ testProg(t, text, 2, exp(1, "version mismatch..."))
testProg(t, text, assemblerNoVersion)
ops = testProg(t, text, assemblerNoVersion)
@@ -2359,8 +2461,8 @@ int 1
ops2 := testProg(t, "int 1", 2)
require.Equal(t, ops2.Program, ops.Program)
- testProg(t, text, 0, Expect{1, "version mismatch..."})
- testProg(t, text, 1, Expect{1, "version mismatch..."})
+ testProg(t, text, 0, exp(1, "version mismatch..."))
+ testProg(t, text, 1, exp(1, "version mismatch..."))
ops = testProg(t, text, assemblerNoVersion)
require.Equal(t, ops2.Program, ops.Program)
@@ -2377,7 +2479,7 @@ len
require.Equal(t, ops2.Program, ops.Program)
testProg(t, "#pragma unk", assemblerNoVersion,
- Expect{1, `unsupported pragma directive: "unk"`})
+ exp(1, `unsupported pragma directive: "unk"`))
}
func TestAssembleConstants(t *testing.T) {
@@ -2451,7 +2553,7 @@ func TestMethodWarning(t *testing.T) {
}
require.Len(t, ops.Warnings, 1)
- require.Contains(t, ops.Warnings[0].Error(), "Invalid ARC-4 ABI method signature for method op")
+ require.ErrorContains(t, ops.Warnings[0], "invalid ARC-4 ABI method signature for method op")
}
}
}
@@ -2460,7 +2562,9 @@ func TestBranchAssemblyTypeCheck(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- text := `
+ for v := uint64(2); v <= AssemblerMaxVersion; v++ {
+ t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
+ text := `
int 0 // current app id [0]
int 1 // key [1, 0]
itob // ["\x01", 0]
@@ -2468,13 +2572,9 @@ func TestBranchAssemblyTypeCheck(t *testing.T) {
pop // [x]
btoi // [n]
`
+ testProg(t, text, v)
- ops := newOpStream(AssemblerMaxVersion)
- err := ops.assemble(text)
- require.NoError(t, err)
- require.Empty(t, ops.Warnings)
-
- text = `
+ text = `
int 0 // current app id [0]
int 1 // key [1, 0]
itob // ["\x01", 0]
@@ -2483,11 +2583,9 @@ func TestBranchAssemblyTypeCheck(t *testing.T) {
flip: // [x]
btoi // [n]
`
-
- ops = newOpStream(AssemblerMaxVersion)
- err = ops.assemble(text)
- require.NoError(t, err)
- require.Empty(t, ops.Warnings)
+ testProg(t, text, v)
+ })
+ }
}
func TestSwapTypeCheck(t *testing.T) {
@@ -2495,29 +2593,29 @@ func TestSwapTypeCheck(t *testing.T) {
t.Parallel()
/* reconfirm that we detect this type error */
- testProg(t, "int 1; byte 0x1234; +", AssemblerMaxVersion, Expect{1, "+ arg 1..."})
+ testProg(t, "int 1; byte 0x1234; +", AssemblerMaxVersion, exp(1, "+ arg 1..."))
/* despite swap, we track types */
- testProg(t, "int 1; byte 0x1234; swap; +", AssemblerMaxVersion, Expect{1, "+ arg 0..."})
- testProg(t, "byte 0x1234; int 1; swap; +", AssemblerMaxVersion, Expect{1, "+ arg 1..."})
+ testProg(t, "int 1; byte 0x1234; swap; +", AssemblerMaxVersion, exp(1, "+ arg 0..."))
+ testProg(t, "byte 0x1234; int 1; swap; +", AssemblerMaxVersion, exp(1, "+ arg 1..."))
}
func TestDigAsm(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "int 1; dig; +", AssemblerMaxVersion, Expect{1, "dig expects 1 immediate..."})
- testProg(t, "int 1; dig junk; +", AssemblerMaxVersion, Expect{1, "dig unable to parse..."})
+ testProg(t, "int 1; dig; +", AssemblerMaxVersion, exp(1, "dig expects 1 immediate..."))
+ testProg(t, "int 1; dig junk; +", AssemblerMaxVersion, exp(1, "dig unable to parse..."))
testProg(t, "int 1; byte 0x1234; int 2; dig 2; +", AssemblerMaxVersion)
testProg(t, "byte 0x32; byte 0x1234; int 2; dig 2; +", AssemblerMaxVersion,
- Expect{1, "+ arg 1..."})
+ exp(1, "+ arg 1..."))
testProg(t, "byte 0x32; byte 0x1234; int 2; dig 3; +", AssemblerMaxVersion,
- Expect{1, "dig 3 expects 4..."})
+ exp(1, "dig 3 expects 4..."))
testProg(t, "int 1; byte 0x1234; int 2; dig 12; +", AssemblerMaxVersion,
- Expect{1, "dig 12 expects 13..."})
+ exp(1, "dig 12 expects 13..."))
// Confirm that digging something out does not ruin our knowledge about the types in the middle
testProg(t, "int 1; byte 0x1234; byte 0x1234; dig 2; dig 3; +; pop; +", AssemblerMaxVersion,
- Expect{1, "+ arg 1..."})
+ exp(1, "+ arg 1..."))
testProg(t, "int 3; pushbytes \"123456\"; int 1; dig 2; substring3", AssemblerMaxVersion)
}
@@ -2525,62 +2623,62 @@ func TestDigAsm(t *testing.T) {
func TestBuryAsm(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "int 1; bury; +", AssemblerMaxVersion, Expect{1, "bury expects 1 immediate..."})
- testProg(t, "int 1; bury junk; +", AssemblerMaxVersion, Expect{1, "bury unable to parse..."})
+ testProg(t, "int 1; bury; +", AssemblerMaxVersion, exp(1, "bury expects 1 immediate..."))
+ testProg(t, "int 1; bury junk; +", AssemblerMaxVersion, exp(1, "bury unable to parse..."))
testProg(t, "int 1; byte 0x1234; int 2; bury 1; +", AssemblerMaxVersion) // the 2 replaces the byte string
testProg(t, "int 2; int 2; byte 0x1234; bury 1; +", AssemblerMaxVersion,
- Expect{1, "+ arg 1..."})
+ exp(1, "+ arg 1..."))
testProg(t, "byte 0x32; byte 0x1234; int 2; bury 3; +", AssemblerMaxVersion,
- Expect{1, "bury 3 expects 4..."})
+ exp(1, "bury 3 expects 4..."))
testProg(t, "int 1; byte 0x1234; int 2; bury 12; +", AssemblerMaxVersion,
- Expect{1, "bury 12 expects 13..."})
+ exp(1, "bury 12 expects 13..."))
// We do not lose track of the ints between ToS and bury index
testProg(t, "int 0; int 1; int 2; int 4; bury 3; concat", AssemblerMaxVersion,
- Expect{1, "concat arg 1 wanted type []byte..."})
+ exp(1, "concat arg 1 wanted type []byte..."))
// Even when we are burying into unknown (seems repetitive, but is an easy bug)
testProg(t, "int 0; int 0; b LABEL; LABEL: int 1; int 2; int 4; bury 4; concat", AssemblerMaxVersion,
- Expect{1, "concat arg 1 wanted type []byte..."})
+ exp(1, "concat arg 1 wanted type []byte..."))
}
func TestEqualsTypeCheck(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "int 1; byte 0x1234; ==", AssemblerMaxVersion, Expect{1, "== arg 0..."})
- testProg(t, "int 1; byte 0x1234; !=", AssemblerMaxVersion, Expect{1, "!= arg 0..."})
- testProg(t, "byte 0x1234; int 1; ==", AssemblerMaxVersion, Expect{1, "== arg 0..."})
- testProg(t, "byte 0x1234; int 1; !=", AssemblerMaxVersion, Expect{1, "!= arg 0..."})
+ testProg(t, "int 1; byte 0x1234; ==", AssemblerMaxVersion, exp(1, "== arg 0..."))
+ testProg(t, "int 1; byte 0x1234; !=", AssemblerMaxVersion, exp(1, "!= arg 0..."))
+ testProg(t, "byte 0x1234; int 1; ==", AssemblerMaxVersion, exp(1, "== arg 0..."))
+ testProg(t, "byte 0x1234; int 1; !=", AssemblerMaxVersion, exp(1, "!= arg 0..."))
}
func TestDupTypeCheck(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "byte 0x1234; dup; int 1; +", AssemblerMaxVersion, Expect{1, "+ arg 0..."})
+ testProg(t, "byte 0x1234; dup; int 1; +", AssemblerMaxVersion, exp(1, "+ arg 0..."))
testProg(t, "byte 0x1234; int 1; dup; +", AssemblerMaxVersion)
- testProg(t, "byte 0x1234; int 1; dup2; +", AssemblerMaxVersion, Expect{1, "+ arg 0..."})
- testProg(t, "int 1; byte 0x1234; dup2; +", AssemblerMaxVersion, Expect{1, "+ arg 1..."})
+ testProg(t, "byte 0x1234; int 1; dup2; +", AssemblerMaxVersion, exp(1, "+ arg 0..."))
+ testProg(t, "int 1; byte 0x1234; dup2; +", AssemblerMaxVersion, exp(1, "+ arg 1..."))
- testProg(t, "byte 0x1234; int 1; dup; dig 1; len", AssemblerMaxVersion, Expect{1, "len arg 0..."})
- testProg(t, "int 1; byte 0x1234; dup; dig 1; !", AssemblerMaxVersion, Expect{1, "! arg 0..."})
+ testProg(t, "byte 0x1234; int 1; dup; dig 1; len", AssemblerMaxVersion, exp(1, "len arg 0..."))
+ testProg(t, "int 1; byte 0x1234; dup; dig 1; !", AssemblerMaxVersion, exp(1, "! arg 0..."))
- testProg(t, "byte 0x1234; int 1; dup2; dig 2; len", AssemblerMaxVersion, Expect{1, "len arg 0..."})
- testProg(t, "int 1; byte 0x1234; dup2; dig 2; !", AssemblerMaxVersion, Expect{1, "! arg 0..."})
+ testProg(t, "byte 0x1234; int 1; dup2; dig 2; len", AssemblerMaxVersion, exp(1, "len arg 0..."))
+ testProg(t, "int 1; byte 0x1234; dup2; dig 2; !", AssemblerMaxVersion, exp(1, "! arg 0..."))
}
func TestSelectTypeCheck(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "int 1; int 2; int 3; select; len", AssemblerMaxVersion, Expect{1, "len arg 0..."})
- testProg(t, "byte 0x1234; byte 0x5678; int 3; select; !", AssemblerMaxVersion, Expect{1, "! arg 0..."})
+ testProg(t, "int 1; int 2; int 3; select; len", AssemblerMaxVersion, exp(1, "len arg 0..."))
+ testProg(t, "byte 0x1234; byte 0x5678; int 3; select; !", AssemblerMaxVersion, exp(1, "! arg 0..."))
}
func TestSetBitTypeCheck(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "int 1; int 2; int 3; setbit; len", AssemblerMaxVersion, Expect{1, "len arg 0..."})
- testProg(t, "byte 0x1234; int 2; int 3; setbit; !", AssemblerMaxVersion, Expect{1, "! arg 0..."})
+ testProg(t, "int 1; int 2; int 3; setbit; len", AssemblerMaxVersion, exp(1, "len arg 0..."))
+ testProg(t, "byte 0x1234; int 2; int 3; setbit; !", AssemblerMaxVersion, exp(1, "! arg 0..."))
}
func TestScratchTypeCheck(t *testing.T) {
@@ -2589,13 +2687,15 @@ func TestScratchTypeCheck(t *testing.T) {
// All scratch slots should start as uint64
testProg(t, "load 0; int 1; +", AssemblerMaxVersion)
// Check load and store accurately using the scratch space
- testProg(t, "byte 0x01; store 0; load 0; int 1; +", AssemblerMaxVersion, Expect{1, "+ arg 0..."})
+ testProg(t, "byte 0x01; store 0; load 0; int 1; +", AssemblerMaxVersion, exp(1, "+ arg 0..."))
// Loads should know the type it's loading if all the slots are the same type
- testProg(t, "int 0; loads; btoi", AssemblerMaxVersion, Expect{1, "btoi arg 0..."})
- // Loads doesn't know the type when slot types vary
- testProg(t, "byte 0x01; store 0; int 1; loads; btoi", AssemblerMaxVersion)
+ testProg(t, "int 0; loads; btoi", AssemblerMaxVersion, exp(1, "btoi arg 0..."))
+ // Loads only knows the type when slot is a const
+ testProg(t, "byte 0x01; store 0; int 1; loads; btoi", AssemblerMaxVersion, exp(1, "btoi arg 0..."))
+ // Loads doesnt know the type if its the result of some other expression where we lose information
+ testProg(t, "byte 0x01; store 0; load 0; btoi; loads; btoi", AssemblerMaxVersion)
// Stores should only set slots to StackAny if they are not the same type as what is being stored
- testProg(t, "byte 0x01; store 0; int 3; byte 0x01; stores; load 0; int 1; +", AssemblerMaxVersion, Expect{1, "+ arg 0..."})
+ testProg(t, "byte 0x01; store 0; int 3; byte 0x01; stores; load 0; int 1; +", AssemblerMaxVersion, exp(1, "+ arg 0..."))
// ScratchSpace should reset after hitting label in deadcode
testProg(t, "byte 0x01; store 0; b label1; label1:; load 0; int 1; +", AssemblerMaxVersion)
// But it should reset to StackAny not uint64
@@ -2603,7 +2703,46 @@ func TestScratchTypeCheck(t *testing.T) {
// Callsubs should also reset the scratch space
testProg(t, "callsub A; load 0; btoi; return; A: byte 0x01; store 0; retsub", AssemblerMaxVersion)
// But the scratchspace should still be tracked after the callsub
- testProg(t, "callsub A; int 1; store 0; load 0; btoi; return; A: retsub", AssemblerMaxVersion, Expect{1, "btoi arg 0..."})
+ testProg(t, "callsub A; int 1; store 0; load 0; btoi; return; A: retsub", AssemblerMaxVersion, exp(1, "btoi arg 0..."))
+
+}
+
+func TestScratchBounds(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ os := testProg(t, "int 5; store 1; load 1; return;", AssemblerMaxVersion)
+ sv := os.known.scratchSpace[1]
+ require.Equal(t, sv.AVMType, avmUint64)
+ require.ElementsMatch(t, sv.Bound, static(5))
+
+ os = testProg(t, "int 5; store 1; load 1; int 1; int 1; stores; return;", AssemblerMaxVersion)
+ sv = os.known.scratchSpace[1]
+ require.Equal(t, sv.AVMType, avmUint64)
+ require.ElementsMatch(t, sv.Bound, bound(1, 1))
+
+ // If the stack type for the slot index is a const, known at assembly time
+ // we can be sure of the slot we need to update
+ os = testProg(t, "int 5; store 1; load 1; int 1; byte 0xff; stores; return;", AssemblerMaxVersion)
+ sv = os.known.scratchSpace[1]
+ require.Equal(t, sv.AVMType, avmBytes)
+ require.ElementsMatch(t, sv.Bound, static(1))
+
+ osv := os.known.scratchSpace[0]
+ require.Equal(t, osv.AVMType, avmUint64)
+ require.ElementsMatch(t, osv.Bound, static(0))
+
+ // Otherwise, we just union all stack types with the incoming type
+ os = testProg(t, "int 5; store 1; load 1; byte 0xaa; btoi; byte 0xff; stores; return;", AssemblerMaxVersion)
+ sv = os.known.scratchSpace[1]
+ require.Equal(t, sv.AVMType, avmAny)
+ require.ElementsMatch(t, sv.Bound, static(0))
+
+ osv = os.known.scratchSpace[0]
+ require.Equal(t, osv.AVMType, avmAny)
+ require.ElementsMatch(t, osv.Bound, static(0))
+
+ testProg(t, "byte 0xff; store 1; load 1; return", AssemblerMaxVersion, exp(1, "return arg 0 wanted type uint64 ..."))
}
// TestProtoAsm confirms that the assembler will yell at you if you are
@@ -2612,7 +2751,7 @@ func TestScratchTypeCheck(t *testing.T) {
func TestProtoAsm(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "proto 0 0", AssemblerMaxVersion, Expect{1, "proto must be unreachable..."})
+ testProg(t, "proto 0 0", AssemblerMaxVersion, exp(1, "proto must be unreachable..."))
testProg(t, notrack("proto 0 0"), AssemblerMaxVersion)
testProg(t, "b a; int 1; a: proto 0 0", AssemblerMaxVersion) // we could flag a `b` to `proto`
@@ -2636,9 +2775,9 @@ func TestCoverAsm(t *testing.T) {
t.Parallel()
testProg(t, `int 4; byte "john"; int 5; cover 2; pop; +`, AssemblerMaxVersion)
testProg(t, `int 4; byte "ayush"; int 5; cover 1; pop; +`, AssemblerMaxVersion)
- testProg(t, `int 4; byte "john"; int 5; cover 2; +`, AssemblerMaxVersion, Expect{1, "+ arg 1..."})
+ testProg(t, `int 4; byte "john"; int 5; cover 2; +`, AssemblerMaxVersion, exp(1, "+ arg 1..."))
- testProg(t, `int 4; cover junk`, AssemblerMaxVersion, Expect{1, "cover unable to parse n ..."})
+ testProg(t, `int 4; cover junk`, AssemblerMaxVersion, exp(1, "cover unable to parse n ..."))
testProg(t, notrack(`int 4; int 5; cover 0`), AssemblerMaxVersion)
}
@@ -2648,44 +2787,45 @@ func TestUncoverAsm(t *testing.T) {
testProg(t, `int 4; byte "john"; int 5; uncover 2; +`, AssemblerMaxVersion)
testProg(t, `int 4; byte "ayush"; int 5; uncover 1; pop; +`, AssemblerMaxVersion)
testProg(t, `int 1; byte "jj"; byte "ayush"; byte "john"; int 5; uncover 4; +`, AssemblerMaxVersion)
- testProg(t, `int 4; byte "ayush"; int 5; uncover 1; +`, AssemblerMaxVersion, Expect{1, "+ arg 1..."})
+ testProg(t, `int 4; byte "ayush"; int 5; uncover 1; +`, AssemblerMaxVersion, exp(1, "+ arg 1..."))
}
func TestTxTypes(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "itxn_begin; itxn_field Sender", 5, Expect{1, "itxn_field Sender expects 1 stack argument..."})
- testProg(t, "itxn_begin; int 1; itxn_field Sender", 5, Expect{1, "...wanted type []byte got uint64"})
- testProg(t, "itxn_begin; byte 0x56127823; itxn_field Sender", 5)
+ testProg(t, "itxn_begin; itxn_field Sender", 5, exp(1, "itxn_field Sender expects 1 stack argument..."))
+ testProg(t, "itxn_begin; int 1; itxn_field Sender", 5, exp(1, "...wanted type address got 1"))
+ testProg(t, "itxn_begin; byte 0x56127823; itxn_field Sender", 5, exp(1, "...wanted type address got [4]byte"))
+ testProg(t, "itxn_begin; global ZeroAddress; itxn_field Sender", 5)
- testProg(t, "itxn_begin; itxn_field Amount", 5, Expect{1, "itxn_field Amount expects 1 stack argument..."})
- testProg(t, "itxn_begin; byte 0x87123376; itxn_field Amount", 5, Expect{1, "...wanted type uint64 got []byte"})
+ testProg(t, "itxn_begin; itxn_field Amount", 5, exp(1, "itxn_field Amount expects 1 stack argument..."))
+ testProg(t, "itxn_begin; byte 0x87123376; itxn_field Amount", 5, exp(1, "...wanted type uint64 got [4]byte"))
testProg(t, "itxn_begin; int 1; itxn_field Amount", 5)
}
func TestBadInnerFields(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "itxn_begin; int 1000; itxn_field FirstValid", 5, Expect{1, "...is not allowed."})
- testProg(t, "itxn_begin; int 1000; itxn_field FirstValidTime", 5, Expect{1, "...is not allowed."})
- testProg(t, "itxn_begin; int 1000; itxn_field LastValid", 5, Expect{1, "...is not allowed."})
- testProg(t, "itxn_begin; int 32; bzero; itxn_field Lease", 5, Expect{1, "...is not allowed."})
- testProg(t, "itxn_begin; byte 0x7263; itxn_field Note", 5, Expect{1, "...Note field was introduced in v6..."})
- testProg(t, "itxn_begin; byte 0x7263; itxn_field VotePK", 5, Expect{1, "...VotePK field was introduced in v6..."})
- testProg(t, "itxn_begin; int 32; bzero; itxn_field TxID", 5, Expect{1, "...is not allowed."})
+ testProg(t, "itxn_begin; int 1000; itxn_field FirstValid", 5, exp(1, "...is not allowed."))
+ testProg(t, "itxn_begin; int 1000; itxn_field FirstValidTime", 5, exp(1, "...is not allowed."))
+ testProg(t, "itxn_begin; int 1000; itxn_field LastValid", 5, exp(1, "...is not allowed."))
+ testProg(t, "itxn_begin; int 32; bzero; itxn_field Lease", 5, exp(1, "...is not allowed."))
+ testProg(t, "itxn_begin; byte 0x7263; itxn_field Note", 5, exp(1, "...Note field was introduced in v6..."))
+ testProg(t, "itxn_begin; global ZeroAddress; itxn_field VotePK", 5, exp(1, "...VotePK field was introduced in v6..."))
+ testProg(t, "itxn_begin; int 32; bzero; itxn_field TxID", 5, exp(1, "...is not allowed."))
- testProg(t, "itxn_begin; int 1000; itxn_field FirstValid", 6, Expect{1, "...is not allowed."})
- testProg(t, "itxn_begin; int 1000; itxn_field LastValid", 6, Expect{1, "...is not allowed."})
- testProg(t, "itxn_begin; int 32; bzero; itxn_field Lease", 6, Expect{1, "...is not allowed."})
+ testProg(t, "itxn_begin; int 1000; itxn_field FirstValid", 6, exp(1, "...is not allowed."))
+ testProg(t, "itxn_begin; int 1000; itxn_field LastValid", 6, exp(1, "...is not allowed."))
+ testProg(t, "itxn_begin; int 32; bzero; itxn_field Lease", 6, exp(1, "...is not allowed."))
testProg(t, "itxn_begin; byte 0x7263; itxn_field Note", 6)
- testProg(t, "itxn_begin; byte 0x7263; itxn_field VotePK", 6)
- testProg(t, "itxn_begin; int 32; bzero; itxn_field TxID", 6, Expect{1, "...is not allowed."})
+ testProg(t, "itxn_begin; global ZeroAddress; itxn_field VotePK", 6)
+ testProg(t, "itxn_begin; int 32; bzero; itxn_field TxID", 6, exp(1, "...is not allowed."))
}
func TestTypeTracking(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "+", LogicVersion, Expect{1, "+ expects 2 stack arguments..."})
+ testProg(t, "+", LogicVersion, exp(1, "+ expects 2 stack arguments..."))
// hitting a label in deadcode starts analyzing again, with unknown stack
testProg(t, "b end; label: +; end: b label", LogicVersion)
@@ -2695,7 +2835,7 @@ func TestTypeTracking(t *testing.T) {
// but we do want to ensure we're not just treating the code after callsub as dead
testProg(t, "callsub A; int 1; concat; return; A: int 1; int 2; retsub", LogicVersion,
- Expect{1, "concat arg 1 wanted..."})
+ exp(1, "concat arg 1 wanted..."))
// retsub deadens code, like any unconditional branch
testProg(t, "callsub A; +; return; A: int 1; int 2; retsub; concat", LogicVersion)
@@ -2711,7 +2851,7 @@ label:
+
confusion:
b label
-`, LogicVersion, Expect{7, "+ arg 0 wanted type uint64..."})
+`, LogicVersion, exp(7, "+ arg 0 wanted type uint64..."))
// Unless that same error is in dead code.
testProg(t, `
@@ -2765,7 +2905,7 @@ done:
concat
#pragma typetrack true
concat
-`, LogicVersion, Expect{5, "concat arg 1 wanted type []byte..."})
+`, LogicVersion, exp(5, "concat arg 1 wanted type []byte..."))
}
func TestMergeProtos(t *testing.T) {
@@ -2799,9 +2939,9 @@ func TestGetSpec(t *testing.T) {
ops.versionedPseudoOps["dummyPseudo"] = make(map[int]OpSpec)
ops.versionedPseudoOps["dummyPseudo"][1] = OpSpec{Name: "b:", Version: AssemblerMaxVersion, Proto: proto("b:")}
ops.versionedPseudoOps["dummyPseudo"][2] = OpSpec{Name: ":", Version: AssemblerMaxVersion}
- _, _, ok := getSpec(ops, "dummyPseudo", []string{})
+ _, _, ok := getSpec(ops, token{str: "dummyPseudo"}, 0)
require.False(t, ok)
- _, _, ok = getSpec(ops, "nonsense", []string{})
+ _, _, ok = getSpec(ops, token{str: "nonsense"}, 0)
require.False(t, ok)
require.Equal(t, 2, len(ops.Errors))
require.Equal(t, "unknown opcode: nonsense", ops.Errors[1].Err.Error())
@@ -2831,8 +2971,8 @@ func TestReplacePseudo(t *testing.T) {
for v := uint64(replaceVersion); v <= AssemblerMaxVersion; v++ {
testProg(t, "byte 0x0000; byte 0x1234; replace 0", v)
testProg(t, "byte 0x0000; int 0; byte 0x1234; replace", v)
- testProg(t, "byte 0x0000; byte 0x1234; replace", v, Expect{1, "replace without immediates expects 3 stack arguments but stack height is 2"})
- testProg(t, "byte 0x0000; int 0; byte 0x1234; replace 0", v, Expect{1, "replace 0 arg 0 wanted type []byte got uint64"})
+ testProg(t, "byte 0x0000; byte 0x1234; replace", v, exp(1, "replace without immediates expects 3 stack arguments but stack height is 2"))
+ testProg(t, "byte 0x0000; int 0; byte 0x1234; replace 0", v, exp(1, "replace 0 arg 0 wanted type []byte got 0"))
}
}
@@ -2886,14 +3026,14 @@ func TestAssembleSwitch(t *testing.T) {
switch label1 label2
label1:
`
- testProg(t, source, AssemblerMaxVersion, NewExpect(3, "reference to undefined label \"label2\""))
+ testProg(t, source, AssemblerMaxVersion, Exp(3, "reference to undefined label \"label2\""))
// fail when target index != uint64
testProg(t, `
byte "fail"
switch label1
labe11:
- `, AssemblerMaxVersion, Expect{3, "switch label1 arg 0 wanted type uint64..."})
+ `, AssemblerMaxVersion, exp(3, "switch label1 arg 0 wanted type uint64..."))
// No labels is pretty degenerate, but ok, I suppose. It's just a no-op
testProg(t, `
@@ -2932,7 +3072,7 @@ int 1
switch %s extra
%s
`, strings.Join(labels, " "), strings.Join(labels, ":\n")+":\n")
- testProg(t, source, AssemblerMaxVersion, Expect{3, "switch cannot take more than 255 labels"})
+ testProg(t, source, AssemblerMaxVersion, exp(3, "switch cannot take more than 255 labels"))
// allow duplicate label reference
source = `
@@ -3149,7 +3289,7 @@ add:
#define c woah hey
int 1
c`,
- AssemblerMaxVersion, Expect{5, "Macro cycle discovered: c -> hey -> x -> d -> c"}, Expect{7, "unknown opcode: c"},
+ AssemblerMaxVersion, exp(5, "macro expansion cycle discovered: c -> hey -> x -> d -> c"), exp(7, "unknown opcode: c"),
)
testProg(t, `
@@ -3159,13 +3299,13 @@ add:
#define c d
int 1
c`,
- AssemblerMaxVersion, Expect{5, "Macro cycle discovered: c -> d -> x -> c"}, Expect{7, "+ expects..."},
+ AssemblerMaxVersion, exp(5, "macro expansion cycle discovered: c -> d -> x -> c"), exp(2, "+ expects...", 12),
)
testProg(t, `
#define X X
int 3`,
- AssemblerMaxVersion, Expect{2, "Macro cycle discovered: X -> X"},
+ AssemblerMaxVersion, exp(2, "macro expansion cycle discovered: X -> X"),
)
// Check that macros names can't be things like named constants, opcodes, etc.
@@ -3180,10 +3320,10 @@ add:
#define + hey // since versioned check is now online, we can error here
int 1`,
assemblerNoVersion,
- Expect{3, "Named constants..."},
- Expect{4, "Named constants..."},
- Expect{6, "Macro names cannot be opcodes: +"},
- Expect{8, "Macro names cannot be opcodes: +"},
+ exp(3, "Named constants..."),
+ exp(4, "Named constants..."),
+ exp(5, "Macro names cannot be opcodes: +"),
+ exp(8, "Macro names cannot be opcodes: +"),
)
// Same check, but this time since no version is given, the versioned check
@@ -3197,10 +3337,10 @@ add:
#define return hi
#define + hey`,
assemblerNoVersion,
- Expect{3, "Named constants..."},
- Expect{4, "Named constants..."},
- Expect{6, "Macro names cannot be opcodes: +"},
- Expect{8, "Macro names cannot be opcodes: +"},
+ exp(3, "Named constants..."),
+ exp(4, "Named constants..."),
+ exp(5, "Macro names cannot be opcodes: +"),
+ exp(8, "Macro names cannot be opcodes: +"),
)
testProg(t, `
@@ -3211,7 +3351,8 @@ add:
#define ApplicationArgs heyyyyy // no error b/c ApplicationArgs is after v1
int 1`,
assemblerNoVersion,
- Expect{4, "Macro names cannot be field names: Sender"}, // error happens once version is known
+ exp(2, "Macro names cannot be field names: Sender"),
+ exp(5, "Macro names cannot be field names: Sender"),
)
// Same check but defaults to AssemblerDefaultVersion instead of pragma
@@ -3222,8 +3363,8 @@ add:
#define Sender helllooooo
#define ApplicationArgs heyyyyy`,
assemblerNoVersion,
- Expect{4, "Macro names cannot be field names: Sender"}, // error happens once version is auto-set
- Expect{5, "Macro names cannot be field names: Sender"}, // and on following line
+ exp(2, "Macro names cannot be field names: Sender"),
+ exp(5, "Macro names cannot be field names: Sender"),
)
// define needs name and body
testLine(t, "#define", AssemblerMaxVersion, "define directive requires a name and body")
@@ -3250,23 +3391,51 @@ add:
int 1
#define coolLabel 1`,
AssemblerMaxVersion,
- Expect{4, "Labels cannot be used as macro names: coolLabel"},
+ exp(4, "Labels cannot be used as macro names: coolLabel"),
)
testProg(t, `
#define coolLabel 1
coolLabel:
int 1`,
AssemblerMaxVersion,
- Expect{3, "Cannot create label with same name as macro: coolLabel"},
+ exp(3, "Cannot create label with same name as macro: coolLabel"),
)
- // Admittedly these two tests are just for coverage
+ // These two tests are just for coverage, they really really can't happen
ops := newOpStream(AssemblerMaxVersion)
- err := define(&ops, []string{"not#define"})
+ err := define(&ops, []token{{str: "not#define"}})
require.EqualError(t, err, "0: invalid syntax: not#define")
- err = pragma(&ops, []string{"not#pragma"})
+ err = pragma(&ops, []token{{str: "not#pragma"}})
require.EqualError(t, err, "0: invalid syntax: not#pragma")
}
+func TestAssembleImmediateRanges(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ /* Perhaps all of these "unable to parse" errors could be improved to
+ discuss limits rather than bailout when the immediate is, in fact, an
+ integer. */
+
+ testProg(t, "int 1; store 0;", AssemblerMaxVersion)
+ testProg(t, "load 255;", AssemblerMaxVersion)
+
+ testProg(t, "int 1; store -1000;", AssemblerMaxVersion,
+ exp(1, "store unable to parse..."))
+ testProg(t, "load -100;", AssemblerMaxVersion,
+ exp(1, "load unable to parse..."))
+ testProg(t, "int 1; store 256;", AssemblerMaxVersion,
+ exp(1, "store i beyond 255: 256"))
+
+ testProg(t, "frame_dig -1;", AssemblerMaxVersion)
+ testProg(t, "frame_dig 127;", AssemblerMaxVersion)
+ testProg(t, "int 1; frame_bury -128;", AssemblerMaxVersion)
+
+ testProg(t, "frame_dig 128;", AssemblerMaxVersion,
+ exp(1, "frame_dig unable to parse..."))
+ testProg(t, "int 1; frame_bury -129;", AssemblerMaxVersion,
+ exp(1, "frame_bury unable to parse..."))
+}
+
func TestAssembleMatch(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -3277,7 +3446,7 @@ func TestAssembleMatch(t *testing.T) {
match label1 label2
label1:
`
- testProg(t, source, AssemblerMaxVersion, NewExpect(3, "reference to undefined label \"label2\""))
+ testProg(t, source, AssemblerMaxVersion, Exp(3, "reference to undefined label \"label2\""))
// No labels is pretty degenerate, but ok, I suppose. It's just a no-op
testProg(t, `
@@ -3325,7 +3494,7 @@ int 1
match %s extra
%s
`, strings.Join(labels, " "), strings.Join(labels, ":\n")+":\n")
- testProg(t, source, AssemblerMaxVersion, Expect{3, "match cannot take more than 255 labels"})
+ testProg(t, source, AssemblerMaxVersion, exp(3, "match cannot take more than 255 labels"))
// allow duplicate label reference
source = `
@@ -3374,20 +3543,19 @@ func TestAssemblePushConsts(t *testing.T) {
// enforce correct types
source = `pushints "1" "2" "3"`
- testProg(t, source, AssemblerMaxVersion, Expect{1, `strconv.ParseUint: parsing "\"1\"": invalid syntax`})
+ testProg(t, source, AssemblerMaxVersion, exp(1, `strconv.ParseUint: parsing "\"1\"": invalid syntax`))
source = `pushbytess 1 2 3`
- testProg(t, source, AssemblerMaxVersion, Expect{1, "byte arg did not parse: 1"})
+ testProg(t, source, AssemblerMaxVersion, exp(1, "pushbytess arg did not parse: 1"))
source = `pushints 6 4; concat`
- testProg(t, source, AssemblerMaxVersion, Expect{1, "concat arg 1 wanted type []byte got uint64"})
+ testProg(t, source, AssemblerMaxVersion, exp(1, "concat arg 1 wanted type []byte got uint64"))
source = `pushbytess "x" "y"; +`
- testProg(t, source, AssemblerMaxVersion, Expect{1, "+ arg 1 wanted type uint64 got []byte"})
+ testProg(t, source, AssemblerMaxVersion, exp(1, "+ arg 1 wanted type uint64 got []byte"))
}
func TestAssembleEmpty(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- emptyExpect := Expect{0, "Cannot assemble empty program text"}
emptyPrograms := []string{
"",
" ",
@@ -3399,7 +3567,7 @@ func TestAssembleEmpty(t *testing.T) {
for version := uint64(1); version <= AssemblerMaxVersion; version++ {
for _, prog := range emptyPrograms {
- testProg(t, prog, version, emptyExpect)
+ testProg(t, prog, version, exp(0, "Cannot assemble empty program text"))
}
testProg(t, nonEmpty, version)
}
@@ -3409,14 +3577,8 @@ func TestReportMultipleErrors(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- assertWithMsg := func(t *testing.T, expectedOutput string, b bytes.Buffer) {
- if b.String() != expectedOutput {
- t.Errorf("Unexpected output: got %q, want %q", b.String(), expectedOutput)
- }
- }
-
ops := &OpStream{
- Errors: []lineError{
+ Errors: []sourceError{
{Line: 1, Err: errors.New("error 1")},
{Err: errors.New("error 2")},
{Line: 3, Err: errors.New("error 3")},
@@ -3436,7 +3598,7 @@ test.txt: 3: error 3
test.txt: warning 1
test.txt: warning 2
`
- assertWithMsg(t, expected, b)
+ require.Equal(t, expected, b.String())
// Test the case where fname is empty
b.Reset()
@@ -3447,43 +3609,124 @@ test.txt: warning 2
warning 1
warning 2
`
- assertWithMsg(t, expected, b)
+ require.Equal(t, expected, b.String())
// no errors or warnings at all
ops = &OpStream{}
b.Reset()
ops.ReportMultipleErrors("blah blah", &b)
expected = ""
- assertWithMsg(t, expected, b)
+ require.Equal(t, expected, b.String())
// more than 10 errors:
file := "great-file.go"
- les := []lineError{}
+ les := []sourceError{}
expectedStrs := []string{}
for i := 1; i <= 11; i++ {
errS := fmt.Errorf("error %d", i)
- les = append(les, lineError{i, errS})
+ les = append(les, sourceError{i, 5, errS})
if i <= 10 {
- expectedStrs = append(expectedStrs, fmt.Sprintf("%s: %d: %s", file, i, errS))
+ expectedStrs = append(expectedStrs, fmt.Sprintf("%s: %d:5: %s", file, i, errS))
}
}
expected = strings.Join(expectedStrs, "\n") + "\n"
ops = &OpStream{Errors: les}
b.Reset()
ops.ReportMultipleErrors(file, &b)
- assertWithMsg(t, expected, b)
+ require.Equal(t, expected, b.String())
// exactly 1 error + filename
- ops = &OpStream{Errors: []lineError{{42, errors.New("super annoying error")}}}
+ ops = &OpStream{Errors: []sourceError{{42, 0, errors.New("super annoying error")}}}
b.Reset()
ops.ReportMultipleErrors("galaxy.py", &b)
expected = "galaxy.py: 1 error: 42: super annoying error\n"
- assertWithMsg(t, expected, b)
+ require.Equal(t, expected, b.String())
// exactly 1 error w/o filename
- ops = &OpStream{Errors: []lineError{{42, errors.New("super annoying error")}}}
+ ops = &OpStream{Errors: []sourceError{{42, 0, errors.New("super annoying error")}}}
b.Reset()
ops.ReportMultipleErrors("", &b)
expected = "1 error: 42: super annoying error\n"
- assertWithMsg(t, expected, b)
+ require.Equal(t, expected, b.String())
+}
+
+// TestDisassembleBadBranch ensures a clean error when a branch has no target.
+func TestDisassembleBadBranch(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ for _, br := range []byte{0x40, 0x41, 0x42} {
+ dis, err := Disassemble([]byte{2, br})
+ require.Error(t, err, dis)
+ dis, err = Disassemble([]byte{2, br, 0x01})
+ require.Error(t, err, dis)
+
+ // It would be reasonable to error here, since it's a jump past the end.
+ dis, err = Disassemble([]byte{2, br, 0x00, 0x05})
+ require.NoError(t, err, dis)
+
+ // It would be reasonable to error here, since it's a back jump in v2.
+ dis, err = Disassemble([]byte{2, br, 0xff, 0x02})
+ require.NoError(t, err, dis)
+
+ dis, err = Disassemble([]byte{2, br, 0x00, 0x01, 0x00})
+ require.NoError(t, err)
+ }
+}
+
+// TestDisassembleBadSwitch ensures a clean error when a switch ends early
+func TestDisassembleBadSwitch(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ source := `
+ int 1
+ switch label1 label2
+ label1:
+ label2:
+ `
+ ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
+ require.NoError(t, err)
+
+ dis, err := Disassemble(ops.Program)
+ require.NoError(t, err, dis)
+
+ // chop off all the labels, but keep the label count
+ dis, err = Disassemble(ops.Program[:len(ops.Program)-4])
+ require.ErrorContains(t, err, "could not decode labels for switch", dis)
+
+ // chop off before the label count
+ dis, err = Disassemble(ops.Program[:len(ops.Program)-5])
+ require.ErrorContains(t, err, "could not decode label count for switch", dis)
+
+ // chop off half of a label
+ dis, err = Disassemble(ops.Program[:len(ops.Program)-1])
+ require.ErrorContains(t, err, "could not decode labels for switch", dis)
+}
+
+// TestDisassembleBadMatch ensures a clean error when a match ends early
+func TestDisassembleBadMatch(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ source := `
+ int 40
+ int 45
+ int 40
+ match label1 label2
+ label1:
+ label2:
+ `
+ ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
+ require.NoError(t, err)
+
+ dis, err := Disassemble(ops.Program)
+ require.NoError(t, err, dis)
+
+ // return the label count, but chop off the labels themselves
+ dis, err = Disassemble(ops.Program[:len(ops.Program)-5])
+ require.ErrorContains(t, err, "could not decode label count for match", dis)
+
+ dis, err = Disassemble(ops.Program[:len(ops.Program)-1])
+ require.ErrorContains(t, err, "could not decode labels for match", dis)
}
diff --git a/data/transactions/logic/backwardCompat_test.go b/data/transactions/logic/backwardCompat_test.go
index df6bd6821..84b230bb8 100644
--- a/data/transactions/logic/backwardCompat_test.go
+++ b/data/transactions/logic/backwardCompat_test.go
@@ -284,7 +284,7 @@ func TestBackwardCompatTEALv1(t *testing.T) {
ep.TxnGroup[0].Lsig.Logic = program
ep.TxnGroup[0].Lsig.Args = [][]byte{data[:], sig[:], pk[:], tx.Sender[:], tx.Note}
- // ensure v1 program runs well on latest TEAL evaluator
+ // ensure v1 program runs well on latest evaluator
require.Equal(t, uint8(1), program[0])
// Cost should stay exactly 2140
@@ -315,7 +315,7 @@ func TestBackwardCompatTEALv1(t *testing.T) {
ep2.Proto.LogicSigMaxCost = 2308
testLogicBytes(t, opsV2.Program, ep2)
- // ensure v0 program runs well on latest TEAL evaluator
+ // ensure v0 program runs well on latest evaluator
ep, tx, _ = makeSampleEnv()
program[0] = 0
sig = c.Sign(Msg{
@@ -464,22 +464,22 @@ func TestBackwardCompatAssemble(t *testing.T) {
// v1 does not allow branching to the last line
// v2 makes such programs legal
t.Parallel()
- source := "int 1; int 1; bnz done; done:"
- t.Run("v=default", func(t *testing.T) {
- t.Parallel()
- testProg(t, source, assemblerNoVersion, Expect{1, "label \"done\" is too far away"})
- })
-
- t.Run("v=default", func(t *testing.T) {
- t.Parallel()
- testProg(t, source, 0, Expect{1, "label \"done\" is too far away"})
- })
-
- t.Run("v=default", func(t *testing.T) {
- t.Parallel()
- testProg(t, source, 1, Expect{1, "label \"done\" is too far away"})
- })
+ // Label is ok, it just can't be branched to
+ source := "int 1; done:"
+ testProg(t, source, assemblerNoVersion)
+ testProg(t, source, 0)
+ testProg(t, source, 1)
+
+ // use multiple lines, so that error report is checked better
+ source = `int 1;
+ int 1;
+ bnz done;
+ done:
+`
+ testProg(t, source, assemblerNoVersion, exp(3, "label \"done\" is too far away", 5))
+ testProg(t, source, 0, exp(3, "label \"done\" is too far away", 5))
+ testProg(t, source, 1, exp(3, "label \"done\" is too far away", 5))
for v := uint64(2); v <= AssemblerMaxVersion; v++ {
v := v
diff --git a/data/transactions/logic/box.go b/data/transactions/logic/box.go
index ebc1c25f2..388ae77fe 100644
--- a/data/transactions/logic/box.go
+++ b/data/transactions/logic/box.go
@@ -37,7 +37,7 @@ func (cx *EvalContext) availableBox(name string, operation int, createSize uint6
dirty, ok := cx.available.boxes[boxRef{cx.appID, name}]
if !ok {
- return nil, false, fmt.Errorf("invalid Box reference %v", name)
+ return nil, false, fmt.Errorf("invalid Box reference %#x", name)
}
// Since the box is in cx.available, we know this GetBox call is cheap. It
@@ -150,7 +150,7 @@ func opBoxExtract(cx *EvalContext) error {
return err
}
if !exists {
- return fmt.Errorf("no such box %#v", name)
+ return fmt.Errorf("no such box %#x", name)
}
bytes, err := extractCarefully(contents, start, length)
@@ -178,7 +178,7 @@ func opBoxReplace(cx *EvalContext) error {
return err
}
if !exists {
- return fmt.Errorf("no such box %#v", name)
+ return fmt.Errorf("no such box %#x", name)
}
bytes, err := replaceCarefully(contents, replacement, start)
diff --git a/data/transactions/logic/box_test.go b/data/transactions/logic/box_test.go
index 77c3adf1b..5f08878a9 100644
--- a/data/transactions/logic/box_test.go
+++ b/data/transactions/logic/box_test.go
@@ -23,7 +23,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/data/transactions/logic"
+ . "github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/txntest"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
@@ -38,29 +38,29 @@ func TestBoxNewDel(t *testing.T) {
t.Run(fmt.Sprintf("box size=%d", size), func(t *testing.T) {
t.Parallel()
- ep, txn, ledger := logic.MakeSampleEnv()
+ ep, txn, ledger := MakeSampleEnv()
createSelf := fmt.Sprintf(`byte "self"; int %d; box_create;`, size)
createOther := fmt.Sprintf(`byte "other"; int %d; box_create;`, size)
ledger.NewApp(txn.Sender, 888, basics.AppParams{})
- logic.TestApp(t, createSelf, ep)
+ TestApp(t, createSelf, ep)
ledger.DelBoxes(888, "self")
- logic.TestApp(t, createSelf+`assert;`+createSelf+`!`, ep)
+ TestApp(t, createSelf+`assert;`+createSelf+`!`, ep)
ledger.DelBoxes(888, "self")
- logic.TestApp(t, createSelf+`assert;`+createOther, ep)
+ TestApp(t, createSelf+`assert;`+createOther, ep)
ledger.DelBoxes(888, "self")
- logic.TestApp(t, createSelf+`assert; byte "self"; box_del`, ep)
- logic.TestApp(t, `byte "self"; box_del; !`, ep)
- logic.TestApp(t, createSelf+`assert
+ TestApp(t, createSelf+`assert; byte "self"; box_del`, ep)
+ TestApp(t, `byte "self"; box_del; !`, ep)
+ TestApp(t, createSelf+`assert
byte "self"; box_del; assert
byte "self"; box_del; !`, ep)
ledger.DelBoxes(888, "self")
- logic.TestApp(t, fmt.Sprintf(
+ TestApp(t, fmt.Sprintf(
`byte "self"; box_get; !; assert; pop
byte "self"; int %d; bzero; box_put; int 1`, size), ep)
})
@@ -72,40 +72,40 @@ func TestBoxNewBad(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- ep, txn, ledger := logic.MakeSampleEnv()
+ ep, txn, ledger := MakeSampleEnv()
ledger.NewApp(txn.Sender, 888, basics.AppParams{})
- logic.TestApp(t, `byte "self"; int 999; box_create`, ep, "write budget")
+ TestApp(t, `byte "self"; int 999; box_create`, ep, "write budget")
// In test proto, you get 100 I/O budget per boxref
ten := [10]transactions.BoxRef{}
txn.Boxes = append(txn.Boxes, ten[:]...) // write budget is now 11*100 = 1100
- logic.TestApp(t, `byte "self"; int 999; box_create`, ep)
+ TestApp(t, `byte "self"; int 999; box_create`, ep)
ledger.DelBoxes(888, "self")
- logic.TestApp(t, `byte "self"; int 1000; box_create`, ep)
+ TestApp(t, `byte "self"; int 1000; box_create`, ep)
ledger.DelBoxes(888, "self")
- logic.TestApp(t, `byte "self"; int 1001; box_create`, ep, "box size too large")
+ TestApp(t, `byte "self"; int 1001; box_create`, ep, "box size too large")
- logic.TestApp(t, `byte "unknown"; int 1000; box_create`, ep, "invalid Box reference")
+ TestApp(t, `byte "unknown"; int 1000; box_create`, ep, "invalid Box reference")
long := strings.Repeat("x", 65)
txn.Boxes = []transactions.BoxRef{{Name: []byte(long)}}
- logic.TestApp(t, fmt.Sprintf(`byte "%s"; int 1000; box_create`, long), ep, "name too long")
+ TestApp(t, NoTrack(fmt.Sprintf(`byte "%s"; int 1000; box_create`, long)), ep, "name too long")
txn.Boxes = []transactions.BoxRef{{Name: []byte("")}} // irrelevant, zero check comes first anyway
- logic.TestApp(t, `byte ""; int 1000; box_create`, ep, "zero length")
+ TestApp(t, NoTrack(`byte ""; int 1000; box_create`), ep, "zero length")
}
func TestBoxReadWrite(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- ep, txn, ledger := logic.MakeSampleEnv()
+ ep, txn, ledger := MakeSampleEnv()
ledger.NewApp(txn.Sender, 888, basics.AppParams{})
// extract some bytes until past the end, confirm the begin as zeros, and
// when it fails.
- logic.TestApp(t, `byte "self"; int 4; box_create; assert
+ TestApp(t, `byte "self"; int 4; box_create; assert
byte "self"; int 1; int 2; box_extract;
byte 0x0000; ==; assert;
byte "self"; int 1; int 3; box_extract;
@@ -114,30 +114,30 @@ func TestBoxReadWrite(t *testing.T) {
byte 0x00000000; ==; assert;
int 1`, ep)
- logic.TestApp(t, `byte "self"; int 1; int 4; box_extract;
+ TestApp(t, `byte "self"; int 1; int 4; box_extract;
byte 0x00000000; ==`, ep, "extraction end 5")
// Replace some bytes until past the end, confirm when it fails.
- logic.TestApp(t, `byte "self"; int 1; byte 0x3031; box_replace;
+ TestApp(t, `byte "self"; int 1; byte 0x3031; box_replace;
byte "self"; int 0; int 4; box_extract;
byte 0x00303100; ==`, ep)
- logic.TestApp(t, `byte "self"; int 1; byte 0x303132; box_replace;
+ TestApp(t, `byte "self"; int 1; byte 0x303132; box_replace;
byte "self"; int 0; int 4; box_extract;
byte 0x00303132; ==`, ep)
- logic.TestApp(t, `byte "self"; int 1; byte 0x30313233; box_replace;
+ TestApp(t, `byte "self"; int 1; byte 0x30313233; box_replace;
byte "self"; int 0; int 4; box_extract;
byte 0x0030313233; ==`, ep, "replacement end 5")
// Replace with different byte in different place.
- logic.TestApp(t, `byte "self"; int 0; byte 0x4444; box_replace;
+ TestApp(t, `byte "self"; int 0; byte 0x4444; box_replace;
byte "self"; int 0; int 4; box_extract;
byte 0x44443132; ==`, ep)
// All bow down to the God of code coverage!
ledger.DelBoxes(888, "self")
- logic.TestApp(t, `byte "self"; int 1; byte 0x3031; box_replace`, ep,
+ TestApp(t, `byte "self"; int 1; byte 0x3031; box_replace`, ep,
"no such box")
- logic.TestApp(t, `byte "junk"; int 1; byte 0x3031; box_replace`, ep,
+ TestApp(t, `byte "junk"; int 1; byte 0x3031; box_replace`, ep,
"invalid Box reference")
}
@@ -145,15 +145,15 @@ func TestBoxAcrossTxns(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- ledger := logic.NewLedger(nil)
+ ledger := NewLedger(nil)
ledger.NewApp(basics.Address{}, 888, basics.AppParams{})
// After creation in first txn, second one can read it (though it's empty)
- logic.TestApps(t, []string{
+ TestApps(t, []string{
`byte "self"; int 64; box_create`,
`byte "self"; int 10; int 4; box_extract; byte 0x00000000; ==`,
}, nil, 8, ledger)
// after creation, modification, the third can read it
- logic.TestApps(t, []string{
+ TestApps(t, []string{
`byte "self"; int 64; box_create`,
`byte "self"; int 2; byte "hi"; box_replace; int 1`,
`byte "self"; int 1; int 4; box_extract; byte 0x00686900; ==`, // "\0hi\0"
@@ -167,24 +167,24 @@ func TestDirtyTracking(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- ep, txn, ledger := logic.MakeSampleEnv()
+ ep, txn, ledger := MakeSampleEnv()
ledger.NewApp(txn.Sender, 888, basics.AppParams{})
- logic.TestApp(t, `byte "self"; int 200; box_create`, ep)
- logic.TestApp(t, `byte "other"; int 201; box_create`, ep, "write budget")
+ TestApp(t, `byte "self"; int 200; box_create`, ep)
+ TestApp(t, `byte "other"; int 201; box_create`, ep, "write budget")
// deleting "self" doesn't give extra write budget to create big "other"
- logic.TestApp(t, `byte "self"; box_del; !; byte "other"; int 201; box_create`, ep,
+ TestApp(t, `byte "self"; box_del; !; byte "other"; int 201; box_create`, ep,
"write budget")
// though it cancels out a creation that happened here
- logic.TestApp(t, `byte "self"; int 200; box_create; assert
+ TestApp(t, `byte "self"; int 200; box_create; assert
byte "self"; box_del; assert
byte "self"; int 200; box_create;
`, ep)
ledger.DelBoxes(888, "self", "other")
// same, but create a different box than deleted
- logic.TestApp(t, `byte "self"; int 200; box_create; assert
+ TestApp(t, `byte "self"; int 200; box_create; assert
byte "self"; box_del; assert
byte "other"; int 200; box_create;
`, ep)
@@ -192,16 +192,16 @@ func TestDirtyTracking(t *testing.T) {
// no funny business by trying to del twice! this case is also interested
// because the read budget is spent on "other", which is 200, while the
// write budget is spent on "self"
- logic.TestApp(t, `byte "other"; box_len; assert`, ep) // reminder, "other" exists!
- logic.TestApp(t, `byte "self"; int 200; box_create; assert
+ TestApp(t, `byte "other"; box_len; assert`, ep) // reminder, "other" exists!
+ TestApp(t, `byte "self"; int 200; box_create; assert
byte "self"; box_del; assert
byte "self"; box_del; !; assert
byte "self"; int 201; box_create;
`, ep, "write budget")
- logic.TestApp(t, `byte "self"; box_len; !; assert; !`, ep) // "self" was not made
- logic.TestApp(t, `byte "self"; int 200; box_create`, ep) // make it
+ TestApp(t, `byte "self"; box_len; !; assert; !`, ep) // "self" was not made
+ TestApp(t, `byte "self"; int 200; box_create`, ep) // make it
// Now that both exist with size 200, naming both in Boxes causes failure
- logic.TestApp(t, `int 1`, ep, "read budget")
+ TestApp(t, `int 1`, ep, "read budget")
}
@@ -223,10 +223,10 @@ func TestBoxUnavailableWithClearState(t *testing.T) {
name, program := name, program
t.Run(name, func(t *testing.T) {
t.Parallel()
- ep, _, l := logic.MakeSampleEnv()
+ ep, _, l := MakeSampleEnv()
l.NewApp(basics.Address{}, 888, basics.AppParams{})
ep.TxnGroup[0].Txn.OnCompletion = transactions.ClearStateOC
- logic.TestApp(t, program, ep, "boxes may not be accessed from ClearState program")
+ TestApp(t, program, ep, "boxes may not be accessed from ClearState program")
})
}
}
@@ -235,39 +235,39 @@ func TestBoxAvailability(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- ledger := logic.NewLedger(nil)
+ ledger := NewLedger(nil)
ledger.NewApp(basics.Address{}, 888, basics.AppParams{})
// B is not available (recall that "self" is set up by MakeSampleEnv, in TestApps)
- logic.TestApps(t, []string{
+ TestApps(t, []string{
`byte "self"; int 64; box_create`,
`byte "B"; int 10; int 4; box_extract; byte 0x00000000; ==`,
- }, nil, 8, ledger, logic.NewExpect(1, "invalid Box reference B"))
+ }, nil, 8, ledger, Exp(1, fmt.Sprintf("invalid Box reference %#x", 'B')))
// B is available if indexed by 0 in tx[1].Boxes
- group := logic.MakeSampleTxnGroup(logic.MakeSampleTxn(), txntest.Txn{
+ group := MakeSampleTxnGroup(MakeSampleTxn(), txntest.Txn{
Type: "appl",
ApplicationID: 10000,
Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("B")}},
}.SignedTxn())
group[0].Txn.Type = protocol.ApplicationCallTx
- logic.TestApps(t, []string{
+ TestApps(t, []string{
`byte "self"; int 64; box_create`,
`byte "B"; int 10; int 4; box_extract; byte 0x00000000; ==`,
- }, group, 8, ledger, logic.NewExpect(1, "no such box"))
+ }, group, 8, ledger, Exp(1, "no such box"))
// B is available if listed by appId in tx[1].Boxes
- group = logic.MakeSampleTxnGroup(logic.MakeSampleTxn(), txntest.Txn{
+ group = MakeSampleTxnGroup(MakeSampleTxn(), txntest.Txn{
Type: "appl",
ApplicationID: 10000,
ForeignApps: []basics.AppIndex{10000},
Boxes: []transactions.BoxRef{{Index: 1, Name: []byte("B")}},
}.SignedTxn())
group[0].Txn.Type = protocol.ApplicationCallTx
- logic.TestApps(t, []string{
+ TestApps(t, []string{
`byte "self"; int 64; box_create`,
`byte "B"; int 10; int 4; box_extract; byte 0x00000000; ==`,
- }, group, 8, ledger, logic.NewExpect(1, "no such box"))
+ }, group, 8, ledger, Exp(1, "no such box"))
}
func TestBoxReadBudget(t *testing.T) {
@@ -277,7 +277,7 @@ func TestBoxReadBudget(t *testing.T) {
appID := basics.AppIndex(888)
appAddr := appID.Address()
- ep, txn, ledger := logic.MakeSampleEnv()
+ ep, txn, ledger := MakeSampleEnv()
ledger.NewApp(basics.Address{}, appID, basics.AppParams{})
// Sample txn has two box refs, so read budget is 2*100
@@ -287,97 +287,97 @@ func TestBoxReadBudget(t *testing.T) {
ledger.NewBox(appID, "third", make([]byte, 100), appAddr)
// Right at budget
- logic.TestApp(t, `byte "self"; box_len; assert; byte "other"; box_len; assert; ==`, ep)
+ TestApp(t, `byte "self"; box_len; assert; byte "other"; box_len; assert; ==`, ep)
// With three box refs, read budget is now 3*100
txn.Boxes = append(txn.Boxes, transactions.BoxRef{Name: []byte("third")})
- logic.TestApp(t, `byte "self"; box_len; assert; byte "third"; box_len; assert; ==`, ep)
+ TestApp(t, `byte "self"; box_len; assert; byte "third"; box_len; assert; ==`, ep)
// Increase "third" box size to 101
ledger.DelBox(appID, "third", appAddr)
ledger.NewBox(appID, "third", make([]byte, 101), appAddr)
// Budget exceeded
- logic.TestApp(t, `byte "self"; box_len; assert; byte "third"; box_len; assert; ==`, ep, "box read budget (300) exceeded")
+ TestApp(t, `byte "self"; box_len; assert; byte "third"; box_len; assert; ==`, ep, "box read budget (300) exceeded")
// Still exceeded if we don't touch the boxes
- logic.TestApp(t, `int 1`, ep, "box read budget (300) exceeded")
+ TestApp(t, `int 1`, ep, "box read budget (300) exceeded")
// Still exceeded with one box ref
txn.Boxes = txn.Boxes[2:]
- logic.TestApp(t, `byte "third"; box_len; assert; int 101; ==`, ep, "box read budget (100) exceeded")
+ TestApp(t, `byte "third"; box_len; assert; int 101; ==`, ep, "box read budget (100) exceeded")
// But not with two
txn.Boxes = append(txn.Boxes, transactions.BoxRef{})
- logic.TestApp(t, `byte "third"; box_len; assert; int 101; ==`, ep)
+ TestApp(t, `byte "third"; box_len; assert; int 101; ==`, ep)
}
func TestBoxWriteBudget(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- ep, _, ledger := logic.MakeSampleEnv()
+ ep, _, ledger := MakeSampleEnv()
ledger.NewApp(basics.Address{}, 888, basics.AppParams{})
// Sample tx[0] has two box refs, so write budget is 2*100
// Test simple use of one box, less than, equal, or over budget
- logic.TestApp(t, `byte "self"; int 4; box_create`, ep)
- logic.TestApp(t, `byte "self"; box_del; assert
+ TestApp(t, `byte "self"; int 4; box_create`, ep)
+ TestApp(t, `byte "self"; box_del; assert
byte "self"; int 199; box_create`, ep)
- logic.TestApp(t, `byte "self"; box_del; assert
+ TestApp(t, `byte "self"; box_del; assert
byte "self"; int 200; box_create`, ep)
- logic.TestApp(t, `byte "self"; box_del; assert
+ TestApp(t, `byte "self"; box_del; assert
byte "self"; int 201; box_create`, ep, "write budget (200) exceeded")
// Test interplay of two different boxes being created
- logic.TestApp(t, `byte "self"; int 4; box_create; assert
+ TestApp(t, `byte "self"; int 4; box_create; assert
byte "other"; int 4; box_create`, ep)
- logic.TestApp(t, `byte "self"; box_del; assert; byte "other"; box_del; assert
+ TestApp(t, `byte "self"; box_del; assert; byte "other"; box_del; assert
byte "self"; int 4; box_create; assert;
byte "other"; int 196; box_create`, ep)
- logic.TestApp(t, `byte "self"; box_del; assert; byte "other"; box_del; assert
+ TestApp(t, `byte "self"; box_del; assert; byte "other"; box_del; assert
byte "self"; int 6; box_create; assert
byte "other"; int 196; box_create`, ep,
"write budget (200) exceeded")
ledger.DelBoxes(888, "other")
- logic.TestApp(t, `byte "self"; box_del; assert
+ TestApp(t, `byte "self"; box_del; assert
byte "self"; int 6; box_create; assert
byte "other"; int 196; box_create; assert // fails to create
byte "self"; box_del;`, ep, "write budget (200) exceeded")
- logic.TestApp(t, `byte "other"; int 196; box_create`, ep)
- logic.TestApp(t, `byte "self"; box_del`, ep, "read budget") // 6 + 196 > 200
- logic.TestApp(t, `byte "junk"; box_del`, ep, "read budget") // fails before invalid "junk" is noticed
+ TestApp(t, `byte "other"; int 196; box_create`, ep)
+ TestApp(t, `byte "self"; box_del`, ep, "read budget") // 6 + 196 > 200
+ TestApp(t, `byte "junk"; box_del`, ep, "read budget") // fails before invalid "junk" is noticed
ledger.DelBoxes(888, "self", "other")
- logic.TestApp(t, `byte "junk"; box_del`, ep, "invalid Box reference")
+ TestApp(t, `byte "junk"; box_del`, ep, "invalid Box reference")
// Create two boxes, that sum to over budget, then test trying to use them together
- logic.TestApp(t, `byte "self"; int 101; box_create`, ep)
- logic.TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
+ TestApp(t, `byte "self"; int 101; box_create`, ep)
+ TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
byte "other"; int 101; box_create`, ep, "write budget (200) exceeded")
- logic.TestApp(t, `byte "other"; int 101; box_create`, ep)
- logic.TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
+ TestApp(t, `byte "other"; int 101; box_create`, ep)
+ TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
byte "other"; int 1; byte 0x3333; box_replace;
int 1`, ep, "read budget (200) exceeded")
ledger.DelBoxes(888, "other")
- logic.TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
+ TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
byte "other"; int 10; box_create`, ep)
// They're now small enough to read and write
- logic.TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
+ TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
byte "other"; int 1; byte 0x3333; box_replace;
int 1`, ep)
// writing twice is no problem (even though it's the big one)
- logic.TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
+ TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
byte "self"; int 50; byte 0x3333; box_replace;
byte "other"; int 1; byte 0x3333; box_replace;
int 1`, ep)
- logic.TestApp(t, `byte "self"; box_del; assert; byte "other"; box_del`, ep) // cleanup
+ TestApp(t, `byte "self"; box_del; assert; byte "other"; box_del`, ep) // cleanup
}
@@ -386,33 +386,33 @@ func TestWriteBudgetPut(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- ep, _, ledger := logic.MakeSampleEnv()
+ ep, _, ledger := MakeSampleEnv()
ledger.NewApp(basics.Address{}, 888, basics.AppParams{})
// Sample tx[0] has two box refs, so write budget is 2*100
// Test simple use of one box
- logic.TestApp(t, `byte "self"; int 200; box_create`, ep) // equal to budget
- logic.TestApp(t, `byte "self"; box_del`, ep)
- logic.TestApp(t, `byte "self"; int 201; box_create`, ep, // 1 over budget
+ TestApp(t, `byte "self"; int 200; box_create`, ep) // equal to budget
+ TestApp(t, `byte "self"; box_del`, ep)
+ TestApp(t, `byte "self"; int 201; box_create`, ep, // 1 over budget
"write budget")
// More complicated versions that use 1 or more 150 byte boxes, so one is ok, two is over
- logic.TestApp(t, `byte "self"; int 150; box_create`, ep)
- logic.TestApp(t, `byte "self"; int 150; bzero; box_put; int 1`, ep)
- logic.TestApp(t, `byte "self"; int 149; bzero; byte "x"; concat; box_put; int 1`, ep)
+ TestApp(t, `byte "self"; int 150; box_create`, ep)
+ TestApp(t, `byte "self"; int 150; bzero; box_put; int 1`, ep)
+ TestApp(t, `byte "self"; int 149; bzero; byte "x"; concat; box_put; int 1`, ep)
// puts to same name, doesn't go over budget (although we don't optimize
// away puts with the same content, this test uses different contents just
// to be sure).
- logic.TestApp(t, `byte "self"; int 150; bzero; box_put;
+ TestApp(t, `byte "self"; int 150; bzero; box_put;
byte "self"; int 149; bzero; byte "x"; concat; box_put; int 1`, ep)
// puts to different names do
- logic.TestApp(t, `byte "self"; int 150; bzero; box_put;
+ TestApp(t, `byte "self"; int 150; bzero; box_put;
byte "other"; int 149; bzero; byte "x"; concat; box_put; int 1`, ep,
"write budget")
// testing a regression: ensure box_put does not double debit when creating
- logic.TestApp(t, `byte "self"; int 150; bzero; box_put; int 1`, ep)
+ TestApp(t, `byte "self"; int 150; bzero; box_put; int 1`, ep)
}
// TestBoxRepeatedCreate ensures that app is not charged write budget for
@@ -421,19 +421,19 @@ func TestBoxRepeatedCreate(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- ep, _, ledger := logic.MakeSampleEnv()
+ ep, _, ledger := MakeSampleEnv()
ledger.NewApp(basics.Address{}, 888, basics.AppParams{})
// Sample tx[0] has two box refs, so write budget is 2*100
- logic.TestApp(t, `byte "self"; int 201; box_create`, ep,
+ TestApp(t, `byte "self"; int 201; box_create`, ep,
"write budget")
- logic.TestApp(t, `byte "self"; int 200; box_create`, ep)
- logic.TestApp(t, `byte "self"; int 200; box_create; !; assert // does not actually create
+ TestApp(t, `byte "self"; int 200; box_create`, ep)
+ TestApp(t, `byte "self"; int 200; box_create; !; assert // does not actually create
byte "other"; int 200; box_create; assert // does create, and budget should be enough
int 1`, ep)
ledger.DelBoxes(888, "self", "other")
- logic.TestApp(t, `byte "other"; int 200; box_create; assert
+ TestApp(t, `byte "other"; int 200; box_create; assert
byte "other"; box_del; assert
byte "other"; int 200; box_create`, ep)
@@ -443,29 +443,29 @@ func TestIOBudgetGrow(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- ep, txn, ledger := logic.MakeSampleEnv()
+ ep, txn, ledger := MakeSampleEnv()
ledger.NewApp(basics.Address{}, 888, basics.AppParams{})
ledger.CreateBox(888, "self", 101)
ledger.CreateBox(888, "other", 101)
- logic.TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
+ TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
byte "other"; int 1; byte 0x3333; box_replace;
int 1`, ep, "read budget (200) exceeded")
txn.Boxes = append(txn.Boxes, transactions.BoxRef{})
// Since we added an empty BoxRef, we can read > 200.
- logic.TestApp(t, `byte "self"; int 1; int 7; box_extract; pop;
+ TestApp(t, `byte "self"; int 1; int 7; box_extract; pop;
byte "other"; int 1; int 7; box_extract; pop;
int 1`, ep)
// Add write, for that matter
- logic.TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
+ TestApp(t, `byte "self"; int 1; byte 0x3333; box_replace;
byte "other"; int 1; byte 0x3333; box_replace;
int 1`, ep)
txn.Boxes = append(txn.Boxes, transactions.BoxRef{Name: []byte("another")})
// Here we read 202, and write a very different 350 (since we now have 4 brs)
- logic.TestApp(t, `byte "self"; int 1; int 7; box_extract; pop;
+ TestApp(t, `byte "self"; int 1; int 7; box_extract; pop;
byte "other"; int 1; int 7; box_extract; pop;
byte "another"; int 350; box_create`, ep)
}
@@ -474,37 +474,37 @@ func TestConveniences(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- ep, _, ledger := logic.MakeSampleEnv()
+ ep, _, ledger := MakeSampleEnv()
ledger.NewApp(basics.Address{}, 888, basics.AppParams{})
// box_get of a new name reports !exists, and returns 0 length bytes.
- logic.TestApp(t, `byte "self"; box_get; !; assert; len; !`, ep)
+ TestApp(t, `byte "self"; box_get; !; assert; len; !`, ep)
// box_len of a new name reports !exists, and returns 0 as the length
- logic.TestApp(t, `byte "self"; box_len; !; assert; !`, ep)
+ TestApp(t, `byte "self"; box_len; !; assert; !`, ep)
// box_put creates the box with contents provided
- logic.TestApp(t, `byte "self"; byte 0x3132; box_put;
+ TestApp(t, `byte "self"; byte 0x3132; box_put;
byte "self"; box_len; assert; int 2; ==; assert
byte "self"; box_get; assert; byte 0x3132; ==`, ep)
// box_put fails if box exists and is wrong size (self exists from last test)
- logic.TestApp(t, `byte "self"; byte 0x313233; box_put; int 1`, ep,
+ TestApp(t, `byte "self"; byte 0x313233; box_put; int 1`, ep,
"box_put wrong size")
ledger.DelBoxes(888, "self")
// put and get can interact with created boxes
- logic.TestApp(t, `byte "self"; int 3; box_create`, ep)
- logic.TestApp(t, `byte "self"; box_get; assert; byte 0x000000; ==`, ep)
- logic.TestApp(t, `byte "self"; byte 0xAABBCC; box_put; int 1`, ep)
- logic.TestApp(t, `byte "self"; int 1; byte 0xDDEE; box_replace; int 1`, ep)
- logic.TestApp(t, `byte "self"; box_get; assert; byte 0xAADDEE; ==`, ep)
+ TestApp(t, `byte "self"; int 3; box_create`, ep)
+ TestApp(t, `byte "self"; box_get; assert; byte 0x000000; ==`, ep)
+ TestApp(t, `byte "self"; byte 0xAABBCC; box_put; int 1`, ep)
+ TestApp(t, `byte "self"; int 1; byte 0xDDEE; box_replace; int 1`, ep)
+ TestApp(t, `byte "self"; box_get; assert; byte 0xAADDEE; ==`, ep)
ledger.DelBoxes(888, "self")
// box_get panics if the box is too big (for TEAL, or for proto)
ep.Proto.MaxBoxSize = 5000
ep.Proto.BytesPerBoxReference = 5000 // avoid write budget error
- logic.TestApp(t, `byte "self"; int 4098; box_create; assert; // bigger than maxStringSize
+ TestApp(t, `byte "self"; int 4098; box_create; assert; // bigger than maxStringSize
byte "self"; box_get; assert; len`, ep,
"box_get produced a too big")
}
@@ -529,9 +529,9 @@ func TestEarlyPanics(t *testing.T) {
name, program := name, program
t.Run(name+"/zero", func(t *testing.T) {
t.Parallel()
- ep, _, l := logic.MakeSampleEnv()
+ ep, _, l := MakeSampleEnv()
l.NewApp(basics.Address{}, 888, basics.AppParams{})
- logic.TestApp(t, fmt.Sprintf(program, ""), ep, "zero length")
+ TestApp(t, NoTrack(fmt.Sprintf(program, "")), ep, "zero length")
})
}
@@ -540,9 +540,9 @@ func TestEarlyPanics(t *testing.T) {
name, program := name, program
t.Run(name+"/long", func(t *testing.T) {
t.Parallel()
- ep, _, l := logic.MakeSampleEnv()
+ ep, _, l := MakeSampleEnv()
l.NewApp(basics.Address{}, 888, basics.AppParams{})
- logic.TestApp(t, fmt.Sprintf(program, big), ep, "name too long")
+ TestApp(t, NoTrack(fmt.Sprintf(program, big)), ep, "name too long")
})
}
@@ -552,18 +552,18 @@ func TestBoxTotals(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- ep, txn, ledger := logic.MakeSampleEnv()
-
- ledger.NewApp(txn.Sender, 888, basics.AppParams{})
- // The SENDER certainly has no boxes (but does exist)
- logic.TestApp(t, `int 0; acct_params_get AcctTotalBoxes; pop; !`, ep)
- // Nor does the app account, to start
- logic.TestApp(t, `int 888; app_params_get AppAddress; assert;
- acct_params_get AcctTotalBoxes; pop; !; `, ep)
- // Create a 31 byte box with a 4 byte name
- logic.TestApp(t, `byte "self"; int 31; box_create`, ep)
- logic.TestApp(t, `int 888; app_params_get AppAddress; assert;
- acct_params_get AcctTotalBoxes; pop; int 1; ==`, ep)
- logic.TestApp(t, `int 888; app_params_get AppAddress; assert;
- acct_params_get AcctTotalBoxBytes; pop; int 35; ==`, ep)
+ TestLogicRange(t, 8, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ ledger.NewApp(tx.Sender, 888, basics.AppParams{})
+ // The SENDER certainly has no boxes (but does exist)
+ TestApp(t, `txn Sender; acct_params_get AcctTotalBoxes; pop; !`, ep)
+ // Nor does the app account, to start
+ TestApp(t, `int 888; app_params_get AppAddress; assert;
+ acct_params_get AcctTotalBoxes; pop; !; `, ep)
+ // Create a 31 byte box with a 4 byte name
+ TestApp(t, `byte "self"; int 31; box_create`, ep)
+ TestApp(t, `int 888; app_params_get AppAddress; assert;
+ acct_params_get AcctTotalBoxes; pop; int 1; ==`, ep)
+ TestApp(t, `int 888; app_params_get AppAddress; assert;
+ acct_params_get AcctTotalBoxBytes; pop; int 35; ==`, ep)
+ })
}
diff --git a/data/transactions/logic/debugger.go b/data/transactions/logic/debugger.go
index ac2bcab7d..bc10ca79d 100644
--- a/data/transactions/logic/debugger.go
+++ b/data/transactions/logic/debugger.go
@@ -29,6 +29,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
)
@@ -73,7 +74,7 @@ func (a *debuggerEvalTracerAdaptor) BeforeTxnGroup(ep *EvalParams) {
}
// AfterTxnGroup updates inner txn depth
-func (a *debuggerEvalTracerAdaptor) AfterTxnGroup(ep *EvalParams, evalError error) {
+func (a *debuggerEvalTracerAdaptor) AfterTxnGroup(ep *EvalParams, deltas *ledgercore.StateDelta, evalError error) {
a.txnDepth--
}
@@ -183,7 +184,7 @@ func makeDebugState(cx *EvalContext) *DebugState {
if err != nil {
sv = stackValue{Bytes: []byte(err.Error())}
}
- globals[fs.field] = stackValueToTealValue(&sv)
+ globals[fs.field] = sv.toEncodedTealValue()
}
ds.Globals = globals
@@ -243,22 +244,13 @@ func (d *DebugState) PCToLine(pc int) int {
return len(strings.Split(d.Disassembly[:offset], "\n")) - one
}
-func stackValueToTealValue(sv *stackValue) basics.TealValue {
- tv := sv.toTealValue()
- return basics.TealValue{
- Type: tv.Type,
- Bytes: base64.StdEncoding.EncodeToString([]byte(tv.Bytes)),
- Uint: tv.Uint,
- }
-}
-
-// valueDeltaToValueDelta converts delta's bytes to base64 in a new struct
-func valueDeltaToValueDelta(vd *basics.ValueDelta) basics.ValueDelta {
- return basics.ValueDelta{
- Action: vd.Action,
- Bytes: base64.StdEncoding.EncodeToString([]byte(vd.Bytes)),
- Uint: vd.Uint,
+// toEncodedTealValue converts stackValue to basics.TealValue, with the Bytes
+// field b64 encoded, so it is suitable for conversion to JSON.
+func (sv stackValue) toEncodedTealValue() basics.TealValue {
+ if sv.avmType() == avmBytes {
+ return basics.TealValue{Type: basics.TealBytesType, Bytes: base64.StdEncoding.EncodeToString(sv.Bytes)}
}
+ return basics.TealValue{Type: basics.TealUintType, Uint: sv.Uint}
}
// parseCallStack initializes an array of CallFrame objects from the raw
@@ -295,12 +287,12 @@ func (a *debuggerEvalTracerAdaptor) refreshDebugState(cx *EvalContext, evalError
stack := make([]basics.TealValue, len(cx.stack))
for i, sv := range cx.stack {
- stack[i] = stackValueToTealValue(&sv)
+ stack[i] = sv.toEncodedTealValue()
}
scratch := make([]basics.TealValue, len(cx.scratch))
for i, sv := range cx.scratch {
- scratch[i] = stackValueToTealValue(&sv)
+ scratch[i] = sv.toEncodedTealValue()
}
ds.Stack = stack
diff --git a/data/transactions/logic/debugger_test.go b/data/transactions/logic/debugger_test.go
index 1303e5713..283bcaa6d 100644
--- a/data/transactions/logic/debugger_test.go
+++ b/data/transactions/logic/debugger_test.go
@@ -17,10 +17,8 @@
package logic
import (
- "encoding/base64"
"testing"
- "github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/stretchr/testify/require"
)
@@ -63,22 +61,6 @@ func TestLineToPC(t *testing.T) {
require.Equal(t, 0, pc)
}
-func TestValueDeltaToValueDelta(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- vDelta := basics.ValueDelta{
- Action: basics.SetUintAction,
- Bytes: "some string",
- Uint: uint64(0xffffffff),
- }
- ans := valueDeltaToValueDelta(&vDelta)
- require.Equal(t, vDelta.Action, ans.Action)
- require.NotEqual(t, vDelta.Bytes, ans.Bytes)
- require.Equal(t, base64.StdEncoding.EncodeToString([]byte(vDelta.Bytes)), ans.Bytes)
- require.Equal(t, vDelta.Uint, ans.Uint)
-}
-
const testCallStackProgram string = `intcblock 1
callsub label1
intc_0
diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go
index 31a0412fa..e4e524880 100644
--- a/data/transactions/logic/doc.go
+++ b/data/transactions/logic/doc.go
@@ -17,6 +17,8 @@
package logic
import (
+ "strings"
+
"github.com/algorand/go-algorand/protocol"
)
@@ -219,84 +221,113 @@ func OpDoc(opName string) string {
return opDocByName[opName]
}
-var opcodeImmediateNotes = map[string]string{
- "intcblock": "{varuint count} [{varuint value}, ...]",
- "intc": "{uint8 int constant index}",
- "pushint": "{varuint int}",
- "pushints": "{varuint count} [{varuint value}, ...]",
- "bytecblock": "{varuint count} [({varuint length} bytes), ...]",
- "bytec": "{uint8 byte constant index}",
- "pushbytes": "{varuint length} {bytes}",
- "pushbytess": "{varuint count} [({varuint length} bytes), ...]",
-
- "arg": "{uint8 arg index}",
- "global": "{uint8 global field index}",
-
- "txn": "{uint8 transaction field index}",
- "gtxn": "{uint8 transaction group index} {uint8 transaction field index}",
- "gtxns": "{uint8 transaction field index}",
- "txna": "{uint8 transaction field index} {uint8 transaction field array index}",
- "gtxna": "{uint8 transaction group index} {uint8 transaction field index} {uint8 transaction field array index}",
- "gtxnsa": "{uint8 transaction field index} {uint8 transaction field array index}",
- "txnas": "{uint8 transaction field index}",
- "gtxnas": "{uint8 transaction group index} {uint8 transaction field index}",
- "gtxnsas": "{uint8 transaction field index}",
-
- "bnz": "{int16 branch offset, big-endian}",
- "bz": "{int16 branch offset, big-endian}",
- "b": "{int16 branch offset, big-endian}",
- "callsub": "{int16 branch offset, big-endian}",
-
- "load": "{uint8 position in scratch space to load from}",
- "store": "{uint8 position in scratch space to store to}",
- "gload": "{uint8 transaction group index} {uint8 position in scratch space to load from}",
- "gloads": "{uint8 position in scratch space to load from}",
- "gaid": "{uint8 transaction group index}",
-
- "substring": "{uint8 start position} {uint8 end position}",
- "extract": "{uint8 start position} {uint8 length}",
- "replace2": "{uint8 start position}",
- "dig": "{uint8 depth}",
- "bury": "{uint8 depth}",
- "cover": "{uint8 depth}",
- "uncover": "{uint8 depth}",
-
- "asset_holding_get": "{uint8 asset holding field index}",
- "asset_params_get": "{uint8 asset params field index}",
- "app_params_get": "{uint8 app params field index}",
- "acct_params_get": "{uint8 account params field index}",
-
- "itxn_field": "{uint8 transaction field index}",
- "itxn": "{uint8 transaction field index}",
- "itxna": "{uint8 transaction field index} {uint8 transaction field array index}",
- "itxnas": "{uint8 transaction field index}",
- "gitxn": "{uint8 transaction group index} {uint8 transaction field index}",
- "gitxna": "{uint8 transaction group index} {uint8 transaction field index} {uint8 transaction field array index}",
- "gitxnas": "{uint8 transaction group index} {uint8 transaction field index}",
-
- "ecdsa_verify": "{uint8 curve index}",
- "ecdsa_pk_decompress": "{uint8 curve index}",
- "ecdsa_pk_recover": "{uint8 curve index}",
-
- "base64_decode": "{uint8 encoding index}",
- "json_ref": "{uint8 return type index}",
-
- "vrf_verify": "{uint8 parameters index}",
- "block": "{uint8 block field index}",
-
- "switch": "{uint8 branch count} [{int16 branch offset, big-endian}, ...]",
- "match": "{uint8 branch count} [{int16 branch offset, big-endian}, ...]",
-
- "proto": "{uint8 arguments} {uint8 return values}",
- "frame_dig": "{int8 frame slot}",
- "frame_bury": "{int8 frame slot}",
- "popn": "{uint8 stack depth}",
- "dupn": "{uint8 copy count}",
+var opcodeImmediateNotes = map[string][]string{
+ "intcblock": {"a block of int constant values"},
+ "intc": {"an index in the intcblock"},
+ "pushint": {"an int constant"},
+ "pushints": {"a list of int constants"},
+ "bytecblock": {"a block of byte constant values"},
+ "bytec": {"an index in the bytecblock"},
+ "pushbytes": {"a byte constant"},
+ "pushbytess": {"a list of byte constants"},
+
+ "arg": {"an arg index"},
+ "global": {"a global field index"},
+
+ "txn": {"transaction field index"},
+ "gtxn": {"transaction group index", "transaction field index"},
+ "gtxns": {"transaction field index"},
+ "txna": {"transaction field index", "transaction field array index"},
+ "gtxna": {"transaction group index", "transaction field index", "transaction field array index"},
+ "gtxnsa": {"transaction field index", "transaction field array index"},
+ "txnas": {"transaction field index"},
+ "gtxnas": {"transaction group index", "transaction field index"},
+ "gtxnsas": {"transaction field index"},
+
+ "bnz": {"branch offset"},
+ "bz": {"branch offset"},
+ "b": {"branch offset"},
+ "callsub": {"branch offset"},
+
+ "load": {"position in scratch space to load from"},
+ "store": {"position in scratch space to store to"},
+ "gload": {"transaction group index", "position in scratch space to load from"},
+ "gloads": {"position in scratch space to load from"},
+ "gaid": {"transaction group index"},
+
+ "substring": {"start position", "end position"},
+ "extract": {"start position", "length"},
+ "replace2": {"start position"},
+ "dig": {"depth"},
+ "bury": {"depth"},
+ "cover": {"depth"},
+ "uncover": {"depth"},
+
+ "asset_holding_get": {"asset holding field index"},
+ "asset_params_get": {"asset params field index"},
+ "app_params_get": {"app params field index"},
+ "acct_params_get": {"account params field index"},
+
+ "itxn_field": {"transaction field index"},
+ "itxn": {"transaction field index"},
+ "itxna": {"transaction field index", "a transaction field array index"},
+ "itxnas": {"transaction field index"},
+ "gitxn": {"transaction group index", "transaction field index"},
+ "gitxna": {"transaction group index", "transaction field index", "transaction field array index"},
+ "gitxnas": {"transaction group index", "transaction field index"},
+
+ "ecdsa_verify": {"curve index"},
+ "ecdsa_pk_decompress": {"curve index"},
+ "ecdsa_pk_recover": {"curve index"},
+
+ "base64_decode": {"encoding index"},
+ "json_ref": {"return type index"},
+
+ "vrf_verify": {" parameters index"},
+ "block": {" block field index"},
+
+ "switch": {"list of labels"},
+ "match": {"list of labels"},
+
+ "proto": {"number of arguments", "number of return values"},
+ "frame_dig": {"frame slot"},
+ "frame_bury": {"frame slot"},
+ "popn": {"stack depth"},
+ "dupn": {"copy count"},
+}
+
+// OpImmediateDetails contains information about the an immediate argument for
+// a given opcode, combining OpSpec details with the extra note in
+// the opcodeImmediateNotes map
+type OpImmediateDetails struct {
+ Comment string `json:",omitempty"`
+ Encoding string `json:",omitempty"`
+ Name string `json:",omitempty"`
+ Reference string `json:",omitempty"`
}
-// OpImmediateNote returns a short string about immediate data which follows the op byte
-func OpImmediateNote(opName string) string {
- return opcodeImmediateNotes[opName]
+// OpImmediateDetailsFromSpec provides a slice of OpImmediateDetails
+// for a given OpSpec
+func OpImmediateDetailsFromSpec(spec OpSpec) []OpImmediateDetails {
+ argNotes := opcodeImmediateNotes[spec.Name]
+ if len(argNotes) == 0 {
+ return nil
+ }
+
+ details := make([]OpImmediateDetails, len(spec.Immediates))
+ for idx, imm := range spec.Immediates {
+ details[idx] = OpImmediateDetails{
+ Name: strings.ToTitle(imm.Name),
+ Comment: argNotes[idx],
+ Encoding: imm.kind.String(),
+ }
+
+ if imm.Group != nil {
+ details[idx].Reference = imm.Group.Name
+ }
+ }
+
+ return details
}
// further documentation on the function of the opcode
@@ -337,8 +368,8 @@ var opDocExtras = map[string]string{
"pushints": "pushints args are not added to the intcblock during assembly processes",
"getbit": "see explanation of bit ordering in setbit",
"setbit": "When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on the integer 0 yields 8, or 2^3. When A is a byte array, index 0 is the leftmost bit of the leftmost byte. Setting bits 0 through 11 to 1 in a 4-byte-array of 0s yields the byte array 0xfff00000. Setting bit 3 to 1 on the 1-byte-array 0x00 yields the byte array 0x10.",
- "balance": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value.",
- "min_balance": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value.",
+ "balance": "params: Txn.Accounts offset (or, since v4, an _available_ account address). Return: value.",
+ "min_balance": "params: Txn.Accounts offset (or, since v4, an _available_ account address). Return: value.",
"app_opted_in": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: 1 if opted in and 0 otherwise.",
"app_local_get": "params: Txn.Accounts offset (or, since v4, an _available_ account address), state key. Return: value. The value is zero (of type uint64) if the key does not exist.",
"app_local_get_ex": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.",
diff --git a/data/transactions/logic/doc_test.go b/data/transactions/logic/doc_test.go
index 8afca4520..cf1c6f029 100644
--- a/data/transactions/logic/doc_test.go
+++ b/data/transactions/logic/doc_test.go
@@ -55,11 +55,11 @@ func TestDocStragglers(t *testing.T) {
for op := range opDocExtras {
_, ok := opDocByName[op]
- require.True(t, ok, "%s is in opDocExtra, but not opDocByName", op)
+ assert.True(t, ok, "%s is in opDocExtra, but not opDocByName", op)
}
for op := range opcodeImmediateNotes {
_, ok := opDocByName[op]
- require.True(t, ok, "%s is in opcodeImmediateNotes, but not opDocByName", op)
+ assert.True(t, ok, "%s is in opcodeImmediateNotes, but not opDocByName", op)
}
}
@@ -98,43 +98,23 @@ func TestOpDoc(t *testing.T) {
require.Empty(t, xd)
}
-func TestOpImmediateNote(t *testing.T) {
+func TestOpImmediateDetails(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- xd := OpImmediateNote("txn")
- require.NotEmpty(t, xd)
- xd = OpImmediateNote("+")
- require.Empty(t, xd)
-}
+ for _, os := range OpSpecs {
+ deets := OpImmediateDetailsFromSpec(os)
+ require.Equal(t, len(os.Immediates), len(deets))
-func TestAllImmediatesDocumented(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
+ for idx, d := range deets {
+ imm := os.Immediates[idx]
+ require.NotEmpty(t, d.Comment)
+ require.Equal(t, strings.ToLower(d.Name), imm.Name)
+ require.Equal(t, d.Encoding, imm.kind.String())
- for _, op := range OpSpecs {
- count := len(op.Immediates)
- note := OpImmediateNote(op.Name)
- if count == 1 && op.Immediates[0].kind >= immBytes {
- // More elaborate than can be checked by easy count.
- assert.NotEmpty(t, note)
- continue
- }
- assert.Equal(t, count, strings.Count(note, "{"), "opcodeImmediateNotes for %s is wrong", op.Name)
- assert.Equal(t, count, strings.Count(note, "}"), "opcodeImmediateNotes for %s is wrong", op.Name)
- for _, imm := range op.Immediates {
- switch imm.kind {
- case immByte:
- require.True(t, strings.HasPrefix(note, "{uint8 "), "%v %v", op.Name, note)
- case immInt8:
- require.True(t, strings.HasPrefix(note, "{int8 "), "%v %v", op.Name, note)
- case immLabel:
- require.True(t, strings.HasPrefix(note, "{int16 "), "%v %v", op.Name, note)
- case immInt:
- require.True(t, strings.HasPrefix(note, "{varuint "), "%v %v", op.Name, note)
+ if imm.Group != nil {
+ require.Equal(t, d.Reference, imm.Group.Name)
}
- close := strings.Index(note, "}")
- note = strings.TrimPrefix(note[close+1:], " ")
}
}
}
diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go
index 3de0f101d..e5e55525e 100644
--- a/data/transactions/logic/eval.go
+++ b/data/transactions/logic/eval.go
@@ -69,7 +69,7 @@ const maxLogCalls = 32
// To be clear, 0 would prevent inner appls, 1 would mean inner app calls cannot
// make inner appls. So the total app depth can be 1 higher than this number, if
// you count the top-level app call.
-const maxAppCallDepth = 8
+var maxAppCallDepth = 8
// maxStackDepth should not change unless controlled by an AVM version change
const maxStackDepth = 1000
@@ -82,11 +82,18 @@ type stackValue struct {
Bytes []byte
}
-func (sv stackValue) argType() StackType {
+func (sv stackValue) avmType() avmType {
if sv.Bytes != nil {
- return StackBytes
+ return avmBytes
+ }
+ return avmUint64
+}
+
+func (sv stackValue) stackType() StackType {
+ if sv.Bytes != nil {
+ return NewStackType(sv.avmType(), static(uint64(len(sv.Bytes))))
}
- return StackUint64
+ return NewStackType(sv.avmType(), static(sv.Uint))
}
func (sv stackValue) typeName() string {
@@ -153,8 +160,8 @@ func (sv stackValue) string(limit int) (string, error) {
return string(sv.Bytes), nil
}
-func (sv stackValue) toTealValue() (tv basics.TealValue) {
- if sv.argType() == StackBytes {
+func (sv stackValue) toTealValue() basics.TealValue {
+ if sv.avmType() == avmBytes {
return basics.TealValue{Type: basics.TealBytesType, Bytes: string(sv.Bytes)}
}
return basics.TealValue{Type: basics.TealUintType, Uint: sv.Uint}
@@ -242,31 +249,29 @@ type LedgerForLogic interface {
Counter() uint64
}
-// resources contains a catalog of available resources. It's used to track the
-// apps, assets, and boxes that are available to a transaction, outside the
-// direct foreign array mechanism.
-type resources struct {
- asas []basics.AssetIndex
- apps []basics.AppIndex
-
- // boxes are all of the top-level box refs from the txgroup. Most are added
- // during NewEvalParams(). refs using 0 on an appl create are resolved and
- // added when the appl executes. The boolean value indicates the "dirtiness"
- // of the box - has it been modified in this txngroup? If yes, the size of
- // the box counts against the group writeBudget. So delete is NOT a dirtying
- // operation.
- boxes map[boxRef]bool
-
- // dirtyBytes maintains a running count of the number of dirty bytes in `boxes`
- dirtyBytes uint64
-}
-
// boxRef is the "hydrated" form of a BoxRef - it has the actual app id, not an index
type boxRef struct {
app basics.AppIndex
name string
}
+// EvalConstants contains constant parameters that are used by opcodes during evaluation (including both real-execution and simulation).
+type EvalConstants struct {
+ // MaxLogSize is the limit of total log size from n log calls in a program
+ MaxLogSize uint64
+
+ // MaxLogCalls is the limit of total log calls during a program execution
+ MaxLogCalls uint64
+}
+
+// RuntimeEvalConstants gives a set of const params used in normal runtime of opcodes
+func RuntimeEvalConstants() EvalConstants {
+ return EvalConstants{
+ MaxLogSize: uint64(maxLogSize),
+ MaxLogCalls: uint64(maxLogCalls),
+ }
+}
+
// EvalParams contains data that comes into condition evaluation.
type EvalParams struct {
Proto *config.ConsensusParams
@@ -318,6 +323,8 @@ type EvalParams struct {
// readBudgetChecked allows us to only check the read budget once
readBudgetChecked bool
+ EvalConstants
+
// Caching these here means the hashes can be shared across the TxnGroup
// (and inners, because the cache is shared with the inner EvalParams)
appAddrCache map[basics.AppIndex]basics.Address
@@ -349,30 +356,9 @@ func copyWithClearAD(txgroup []transactions.SignedTxnWithAD) []transactions.Sign
// NewEvalParams creates an EvalParams to use while evaluating a top-level txgroup
func NewEvalParams(txgroup []transactions.SignedTxnWithAD, proto *config.ConsensusParams, specials *transactions.SpecialAddresses) *EvalParams {
apps := 0
- var allBoxes map[boxRef]bool
for _, tx := range txgroup {
if tx.Txn.Type == protocol.ApplicationCallTx {
apps++
- if allBoxes == nil && len(tx.Txn.Boxes) > 0 {
- allBoxes = make(map[boxRef]bool)
- }
- for _, br := range tx.Txn.Boxes {
- var app basics.AppIndex
- if br.Index == 0 {
- // "current app": Ignore if this is a create, else use ApplicationID
- if tx.Txn.ApplicationID == 0 {
- // When the create actually happens, and we learn the appID, we'll add it.
- continue
- }
- app = tx.Txn.ApplicationID
- } else {
- // Bounds check will already have been done by
- // WellFormed. For testing purposes, it's better to panic
- // now than after returning a nil.
- app = tx.Txn.ForeignApps[br.Index-1] // shift for the 0=this convention
- }
- allBoxes[boxRef{app, string(br.Name)}] = false
- }
}
}
@@ -392,17 +378,17 @@ func NewEvalParams(txgroup []transactions.SignedTxnWithAD, proto *config.Consens
credit := feeCredit(txgroup, proto.MinTxnFee)
- if proto.EnableAppCostPooling && apps > 0 {
+ if proto.EnableAppCostPooling {
pooledApplicationBudget = new(int)
*pooledApplicationBudget = apps * proto.MaxAppProgramCost
}
- if proto.EnableInnerTransactionPooling && apps > 0 {
+ if proto.EnableInnerTransactionPooling {
pooledAllowedInners = new(int)
*pooledAllowedInners = proto.MaxTxGroupSize * proto.MaxInnerTransactions
}
- return &EvalParams{
+ ep := &EvalParams{
TxnGroup: copyWithClearAD(txgroup),
Proto: proto,
Specials: specials,
@@ -411,9 +397,30 @@ func NewEvalParams(txgroup []transactions.SignedTxnWithAD, proto *config.Consens
FeeCredit: &credit,
PooledApplicationBudget: pooledApplicationBudget,
pooledAllowedInners: pooledAllowedInners,
- available: &resources{boxes: allBoxes},
appAddrCache: make(map[basics.AppIndex]basics.Address),
+ EvalConstants: RuntimeEvalConstants(),
+ }
+ // resources are computed after ep is constructed because app addresses are
+ // calculated there, and we'd like to use the caching mechanism built into
+ // the EvalParams. Perhaps we can make the computation even lazier, so it is
+ // only computed if needed.
+ ep.available = ep.computeAvailability()
+ return ep
+}
+
+func (ep *EvalParams) computeAvailability() *resources {
+ available := &resources{
+ sharedAccounts: make(map[basics.Address]struct{}),
+ sharedAsas: make(map[basics.AssetIndex]struct{}),
+ sharedApps: make(map[basics.AppIndex]struct{}),
+ sharedHoldings: make(map[ledgercore.AccountAsset]struct{}),
+ sharedLocals: make(map[ledgercore.AccountApp]struct{}),
+ boxes: make(map[boxRef]bool),
+ }
+ for i := range ep.TxnGroup {
+ available.fill(&ep.TxnGroup[i].Txn, ep)
}
+ return available
}
// feeCredit returns the extra fee supplied in this top-level txgroup compared
@@ -471,6 +478,7 @@ func NewInnerEvalParams(txg []transactions.SignedTxnWithAD, caller *EvalContext)
ioBudget: caller.ioBudget,
readBudgetChecked: true, // don't check for inners
appAddrCache: caller.appAddrCache,
+ EvalConstants: caller.EvalConstants,
// read comment in EvalParams declaration about txid caches
caller: caller,
}
@@ -531,10 +539,16 @@ func (ep *EvalParams) RecordAD(gi int, ad transactions.ApplyData) {
}
ep.TxnGroup[gi].ApplyData = ad
if aid := ad.ConfigAsset; aid != 0 {
- ep.available.asas = append(ep.available.asas, aid)
+ if ep.available.createdAsas == nil {
+ ep.available.createdAsas = make(map[basics.AssetIndex]struct{})
+ }
+ ep.available.createdAsas[aid] = struct{}{}
}
if aid := ad.ApplicationID; aid != 0 {
- ep.available.apps = append(ep.available.apps, aid)
+ if ep.available.createdApps == nil {
+ ep.available.createdApps = make(map[basics.AppIndex]struct{})
+ }
+ ep.available.createdApps[aid] = struct{}{}
}
}
@@ -612,98 +626,316 @@ func (cx *EvalContext) RunMode() RunMode {
return cx.runModeFlags
}
-// StackType describes the type of a value on the operand stack
-type StackType byte
+// avmType describes the type of a value on the operand stack
+// avmTypes are a subset of StackTypes
+type avmType byte
const (
- // StackNone in an OpSpec shows that the op pops or yields nothing
- StackNone StackType = iota
+ // avmNone in an OpSpec shows that the op pops or yields nothing
+ avmNone avmType = iota
- // StackAny in an OpSpec shows that the op pops or yield any type
- StackAny
+ // avmAny in an OpSpec shows that the op pops or yield any type
+ avmAny
- // StackUint64 in an OpSpec shows that the op pops or yields a uint64
- StackUint64
+ // avmUint64 in an OpSpec shows that the op pops or yields a uint64
+ avmUint64
- // StackBytes in an OpSpec shows that the op pops or yields a []byte
- StackBytes
+ // avmBytes in an OpSpec shows that the op pops or yields a []byte
+ avmBytes
)
-// StackTypes is an alias for a list of StackType with syntactic sugar
-type StackTypes []StackType
+func (at avmType) String() string {
+ switch at {
+ case avmNone:
+ return "none"
+ case avmAny:
+ return "any"
+ case avmUint64:
+ return "uint64"
+ case avmBytes:
+ return "[]byte"
+ }
+ return "internal error, unknown type"
+}
-func parseStackTypes(spec string) StackTypes {
- if spec == "" {
- return nil
+// stackType lifts the avmType to a StackType
+// it can do this because the base StackTypes
+// are a superset of avmType
+func (at avmType) stackType() StackType {
+ switch at {
+ case avmNone:
+ return StackNone
+ case avmAny:
+ return StackAny
+ case avmUint64:
+ return StackUint64
+ case avmBytes:
+ return StackBytes
}
- types := make(StackTypes, len(spec))
- for i, letter := range spec {
- switch letter {
- case 'a':
- types[i] = StackAny
- case 'b':
- types[i] = StackBytes
- case 'i':
- types[i] = StackUint64
- case 'x':
- types[i] = StackNone
- default:
- panic(spec)
+ return StackNone
+}
+
+var (
+ // StackUint64 is any valid uint64
+ StackUint64 = NewStackType(avmUint64, bound(0, math.MaxUint64))
+ // StackBytes is any valid bytestring
+ StackBytes = NewStackType(avmBytes, bound(0, maxStringSize))
+ // StackAny could be Bytes or Uint64
+ StackAny = StackType{
+ Name: avmAny.String(),
+ AVMType: avmAny,
+ Bound: [2]uint64{0, 0},
+ }
+ // StackNone is used when there is no input or output to
+ // an opcode
+ StackNone = StackType{
+ Name: avmNone.String(),
+ AVMType: avmNone,
+ }
+
+ // StackBoolean constrains the int to 1 or 0, representing True or False
+ StackBoolean = NewStackType(avmUint64, bound(0, 1), "bool")
+ // StackAddress represents an address
+ StackAddress = NewStackType(avmBytes, static(32), "address")
+ // StackBytes32 represents a bytestring that should have exactly 32 bytes
+ StackBytes32 = NewStackType(avmBytes, static(32), "[32]byte")
+ // StackBigInt represents a bytestring that should be treated like an int
+ StackBigInt = NewStackType(avmBytes, bound(0, maxByteMathSize), "bigint")
+ // StackMethodSelector represents a bytestring that should be treated like a method selector
+ StackMethodSelector = NewStackType(avmBytes, static(4), "method")
+ // StackStateKey represents a bytestring that can be used as a key to some storage (global/local/box)
+ StackStateKey = NewStackType(avmBytes, bound(0, 64), "stateKey")
+ // StackBoxName represents a bytestring that can be used as a key to a box
+ StackBoxName = NewStackType(avmBytes, bound(1, 64), "boxName")
+
+ // StackZeroUint64 is a StackUint64 with a minimum value of 0 and a maximum value of 0
+ StackZeroUint64 = NewStackType(avmUint64, bound(0, 0), "0")
+ // StackZeroBytes is a StackBytes with a minimum length of 0 and a maximum length of 0
+ StackZeroBytes = NewStackType(avmUint64, bound(0, 0), "''")
+
+ // AllStackTypes is a map of all the stack types we recognize
+ // so that we can iterate over them in doc prep
+ // and use them for opcode proto shorthand
+ AllStackTypes = map[rune]StackType{
+ 'a': StackAny,
+ 'b': StackBytes,
+ 'i': StackUint64,
+ 'x': StackNone,
+ 'A': StackAddress,
+ 'I': StackBigInt,
+ 'T': StackBoolean,
+ 'H': StackBytes32,
+ 'M': StackMethodSelector,
+ 'K': StackStateKey,
+ 'N': StackBoxName,
+ }
+)
+
+func bound(min, max uint64) [2]uint64 {
+ return [2]uint64{min, max}
+}
+
+func static(size uint64) [2]uint64 {
+ return bound(size, size)
+}
+
+func union(a, b [2]uint64) [2]uint64 {
+ u := [2]uint64{a[0], a[1]}
+ if b[0] < u[0] {
+ u[0] = b[0]
+ }
+
+ if b[1] > u[1] {
+ u[1] = b[1]
+ }
+ return u
+}
+
+// StackType describes the type of a value on the operand stack
+type StackType struct {
+ Name string // alias (address, boolean, ...) or derived name [5]byte
+ AVMType avmType
+ Bound [2]uint64 // represents max/min value for uint64 or max/min length for byte[]
+}
+
+// NewStackType Initializes a new StackType with fields passed
+func NewStackType(at avmType, bounds [2]uint64, stname ...string) StackType {
+ name := at.String()
+
+ // It's static, set the name to show
+ // the static value
+ if bounds[0] == bounds[1] {
+ switch at {
+ case avmBytes:
+ name = fmt.Sprintf("[%d]byte", bounds[0])
+ case avmUint64:
+ name = fmt.Sprintf("%d", bounds[0])
}
}
- return types
+
+ if len(stname) > 0 {
+ name = stname[0]
+ }
+
+ return StackType{Name: name, AVMType: at, Bound: bounds}
}
-func (st StackType) String() string {
- switch st {
- case StackNone:
- return "None"
- case StackAny:
- return "any"
- case StackUint64:
- return "uint64"
- case StackBytes:
- return "[]byte"
+func (st StackType) union(b StackType) StackType {
+ // TODO: Can we ever receive one or the other
+ // as None? should that be a panic?
+ if st.AVMType != b.AVMType {
+ return StackAny
}
- return "internal error, unknown type"
+
+ // Same type now, so we can just take the union of the bounds
+ return NewStackType(st.AVMType, union(st.Bound, b.Bound))
+}
+
+func (st StackType) narrowed(bounds [2]uint64) StackType {
+ return NewStackType(st.AVMType, bounds)
+}
+
+func (st StackType) widened() StackType {
+ // Take only the avm type
+ switch st.AVMType {
+ case avmBytes:
+ return StackBytes
+ case avmUint64:
+ return StackUint64
+ case avmAny:
+ return StackAny
+ default:
+ panic(fmt.Sprintf("What are you tyring to widen?: %+v", st))
+ }
+}
+
+func (st StackType) constant() (uint64, bool) {
+ if st.Bound[0] == st.Bound[1] {
+ return st.Bound[0], true
+ }
+ return 0, false
+}
+
+// overlaps checks if there is enough overlap
+// between the given types that the receiver can
+// possible fit in the expected type
+func (st StackType) overlaps(expected StackType) bool {
+ if st.AVMType == avmNone || expected.AVMType == avmNone {
+ return false
+ }
+
+ if st.AVMType == avmAny || expected.AVMType == avmAny {
+ return true
+ }
+
+ // By now, both are either uint or bytes
+ // and must match
+ if st.AVMType != expected.AVMType {
+ return false
+ }
+
+ // Same type now
+ // Check if our constraints will satisfy the other type
+ smin, smax := st.Bound[0], st.Bound[1]
+ emin, emax := expected.Bound[0], expected.Bound[1]
+
+ return smin <= emax && smax >= emin
+}
+
+func (st StackType) String() string {
+ return st.Name
}
// Typed tells whether the StackType is a specific concrete type.
func (st StackType) Typed() bool {
- switch st {
- case StackUint64, StackBytes:
+ switch st.AVMType {
+ case avmUint64, avmBytes:
return true
}
return false
}
-// PanicError wraps a recover() catching a panic()
-type PanicError struct {
+// StackTypes is an alias for a list of StackType with syntactic sugar
+type StackTypes []StackType
+
+// Reversed returns the StackTypes in reverse order
+// useful for displaying the stack as an op sees it
+func (st StackTypes) Reversed() StackTypes {
+ nst := make(StackTypes, len(st))
+ for idx := 0; idx < len(st); idx++ {
+ nst[idx] = st[len(st)-1-idx]
+ }
+ return nst
+}
+
+func (st StackTypes) String() string {
+ // Note this reverses the stack so top appears first
+ return fmt.Sprintf("(%s)", strings.Join(st.strings(), ", "))
+}
+
+func (st StackTypes) strings() []string {
+ var strs = make([]string, len(st))
+ for idx, s := range st {
+ strs[idx] = s.String()
+ }
+ return strs
+}
+
+func parseStackTypes(spec string) StackTypes {
+ if spec == "" {
+ return nil
+ }
+ types := make(StackTypes, len(spec))
+ for i, letter := range spec {
+ st, ok := AllStackTypes[letter]
+ if !ok {
+ panic(spec)
+ }
+ types[i] = st
+ }
+ return types
+}
+
+// panicError wraps a recover() catching a panic()
+type panicError struct {
PanicValue interface{}
StackTrace string
}
-func (pe PanicError) Error() string {
+func (pe panicError) Error() string {
return fmt.Sprintf("panic in TEAL Eval: %v\n%s", pe.PanicValue, pe.StackTrace)
}
var errLogicSigNotSupported = errors.New("LogicSig not supported")
var errTooManyArgs = errors.New("LogicSig has too many arguments")
-// ClearStateBudgetError allows evaluation to signal that the caller should
-// reject the transaction. Normally, an error in evaluation would not cause a
-// ClearState txn to fail. However, callers fail a txn for ClearStateBudgetError
-// because the transaction has not provided enough budget to let ClearState do
-// its job.
-type ClearStateBudgetError struct {
- offered int
+// EvalError indicates AVM evaluation failure
+type EvalError struct {
+ Err error
+ details string
+ groupIndex int
+ logicsig bool
}
-func (e ClearStateBudgetError) Error() string {
- return fmt.Sprintf("Attempted ClearState execution with low OpcodeBudget %d", e.offered)
+// Error satisfies builtin interface `error`
+func (err EvalError) Error() string {
+ var msg string
+ if err.logicsig {
+ msg = fmt.Sprintf("rejected by logic err=%v", err.Err)
+ } else {
+ msg = fmt.Sprintf("logic eval error: %v", err.Err)
+ }
+ if err.details == "" {
+ return msg
+ }
+ return msg + ". Details: " + err.details
}
-// EvalContract executes stateful TEAL program as the gi'th transaction in params
+func (err EvalError) Unwrap() error {
+ return err.Err
+}
+
+// EvalContract executes stateful program as the gi'th transaction in params
func EvalContract(program []byte, gi int, aid basics.AppIndex, params *EvalParams) (bool, *EvalContext, error) {
if params.Ledger == nil {
return false, nil, errors.New("no ledger in contract eval")
@@ -724,18 +956,25 @@ func EvalContract(program []byte, gi int, aid basics.AppIndex, params *EvalParam
if cx.Proto.IsolateClearState && cx.txn.Txn.OnCompletion == transactions.ClearStateOC {
if cx.PooledApplicationBudget != nil && *cx.PooledApplicationBudget < cx.Proto.MaxAppProgramCost {
- return false, nil, ClearStateBudgetError{*cx.PooledApplicationBudget}
+ return false, nil, fmt.Errorf("Attempted ClearState execution with low OpcodeBudget %d", *cx.PooledApplicationBudget)
}
}
- // If this is a creation, make any "0 index" box refs available now that we
- // have an appID.
+ // If this is a creation...
if cx.txn.Txn.ApplicationID == 0 {
+ // make any "0 index" box refs available now that we have an appID.
for _, br := range cx.txn.Txn.Boxes {
if br.Index == 0 {
cx.EvalParams.available.boxes[boxRef{cx.appID, string(br.Name)}] = false
}
}
+ // and add the appID to `createdApps`
+ if cx.EvalParams.Proto.LogicSigVersion >= sharedResourcesVersion {
+ if cx.EvalParams.available.createdApps == nil {
+ cx.EvalParams.available.createdApps = make(map[basics.AppIndex]struct{})
+ }
+ cx.EvalParams.available.createdApps[cx.appID] = struct{}{}
+ }
}
// Check the I/O budget for reading if this is the first top-level app call
@@ -765,7 +1004,11 @@ func EvalContract(program []byte, gi int, aid basics.AppIndex, params *EvalParam
used = basics.AddSaturate(used, size)
if used > cx.ioBudget {
- return false, nil, fmt.Errorf("box read budget (%d) exceeded", cx.ioBudget)
+ err = fmt.Errorf("box read budget (%d) exceeded", cx.ioBudget)
+ if !cx.Proto.EnableBareBudgetError {
+ err = EvalError{err, "", gi, false}
+ }
+ return false, nil, err
}
}
cx.readBudgetChecked = true
@@ -775,6 +1018,11 @@ func EvalContract(program []byte, gi int, aid basics.AppIndex, params *EvalParam
fmt.Fprintf(cx.Trace, "--- enter %d %s %v\n", aid, cx.txn.Txn.OnCompletion, cx.txn.Txn.ApplicationArgs)
}
pass, err := eval(program, &cx)
+ if err != nil {
+ pc, det := cx.pcDetails()
+ details := fmt.Sprintf("pc=%d, opcodes=%s", pc, det)
+ err = EvalError{err, details, gi, false}
+ }
if cx.Trace != nil && cx.caller != nil {
fmt.Fprintf(cx.Trace, "--- exit %d accept=%t\n", aid, pass)
@@ -797,7 +1045,7 @@ func EvalApp(program []byte, gi int, aid basics.AppIndex, params *EvalParams) (b
// EvalSignatureFull evaluates the logicsig of the ith transaction in params.
// A program passes successfully if it finishes with one int element on the stack that is non-zero.
// It returns EvalContext suitable for obtaining additional info about the execution.
-func EvalSignatureFull(gi int, params *EvalParams) (pass bool, pcx *EvalContext, err error) {
+func EvalSignatureFull(gi int, params *EvalParams) (bool, *EvalContext, error) {
if params.SigLedger == nil {
return false, nil, errors.New("no sig ledger in signature eval")
}
@@ -807,14 +1055,21 @@ func EvalSignatureFull(gi int, params *EvalParams) (pass bool, pcx *EvalContext,
groupIndex: gi,
txn: &params.TxnGroup[gi],
}
- pass, err = eval(cx.txn.Lsig.Logic, &cx)
+ pass, err := eval(cx.txn.Lsig.Logic, &cx)
+
+ if err != nil {
+ pc, det := cx.pcDetails()
+ details := fmt.Sprintf("pc=%d, opcodes=%s", pc, det)
+ err = EvalError{err, details, gi, true}
+ }
+
return pass, &cx, err
}
// EvalSignature evaluates the logicsig of the ith transaction in params.
// A program passes successfully if it finishes with one int element on the stack that is non-zero.
-func EvalSignature(gi int, params *EvalParams) (pass bool, err error) {
- pass, _, err = EvalSignatureFull(gi, params)
+func EvalSignature(gi int, params *EvalParams) (bool, error) {
+ pass, _, err := EvalSignatureFull(gi, params)
return pass, err
}
@@ -830,7 +1085,7 @@ func eval(program []byte, cx *EvalContext) (pass bool, err error) {
if cx.Trace != nil {
errstr += cx.Trace.String()
}
- err = PanicError{x, errstr}
+ err = panicError{x, errstr}
cx.EvalParams.log().Errorf("recovered panic in Eval: %v", err)
}
}()
@@ -940,7 +1195,7 @@ func check(program []byte, params *EvalParams, mode RunMode) (err error) {
if params.Trace != nil {
errstr += params.Trace.String()
}
- err = PanicError{x, errstr}
+ err = panicError{x, errstr}
params.log().Errorf("recovered panic in Check: %s", err)
}
}()
@@ -1007,8 +1262,8 @@ func versionCheck(program []byte, params *EvalParams) (uint64, int, error) {
return version, vlen, nil
}
-func opCompat(expected, got StackType) bool {
- if expected == StackAny {
+func opCompat(expected, got avmType) bool {
+ if expected == avmAny {
return true
}
return expected == got
@@ -1091,7 +1346,7 @@ func (cx *EvalContext) step() error {
}
first := len(cx.stack) - len(spec.Arg.Types)
for i, argType := range spec.Arg.Types {
- if !opCompat(argType, cx.stack[first+i].argType()) {
+ if !opCompat(argType.AVMType, cx.stack[first+i].avmType()) {
return fmt.Errorf("%s arg %d wanted %s but got %s", spec.Name, i, argType, cx.stack[first+i].typeName())
}
}
@@ -1139,14 +1394,14 @@ func (cx *EvalContext) step() error {
}
first = postheight - len(spec.Return.Types)
for i, argType := range spec.Return.Types {
- stackType := cx.stack[first+i].argType()
- if !opCompat(argType, stackType) {
+ stackType := cx.stack[first+i].avmType()
+ if !opCompat(argType.AVMType, stackType) {
if spec.AlwaysExits() { // We test in the loop because it's the uncommon case.
break
}
return fmt.Errorf("%s produced %s but intended %s", spec.Name, cx.stack[first+i].typeName(), argType)
}
- if stackType == StackBytes && len(cx.stack[first+i].Bytes) > maxStringSize {
+ if stackType == avmBytes && len(cx.stack[first+i].Bytes) > maxStringSize {
return fmt.Errorf("%s produced a too big (%d) byte-array", spec.Name, len(cx.stack[first+i].Bytes))
}
}
@@ -1536,13 +1791,13 @@ func opOr(cx *EvalContext) error {
func opEq(cx *EvalContext) error {
last := len(cx.stack) - 1
prev := last - 1
- ta := cx.stack[prev].argType()
- tb := cx.stack[last].argType()
+ ta := cx.stack[prev].avmType()
+ tb := cx.stack[last].avmType()
if ta != tb {
return fmt.Errorf("cannot compare (%s to %s)", cx.stack[prev].typeName(), cx.stack[last].typeName())
}
var cond bool
- if ta == StackBytes {
+ if ta == avmBytes {
cond = bytes.Equal(cx.stack[prev].Bytes, cx.stack[last].Bytes)
} else {
cond = cx.stack[prev].Uint == cx.stack[last].Uint
@@ -1578,7 +1833,7 @@ func opItob(cx *EvalContext) error {
ibytes := make([]byte, 8)
binary.BigEndian.PutUint64(ibytes, cx.stack[last].Uint)
// cx.stack[last].Uint is not cleared out as optimization
- // stackValue.argType() checks Bytes field first
+ // stackValue.avmType() checks Bytes field first
cx.stack[last].Bytes = ibytes
return nil
}
@@ -1681,7 +1936,7 @@ func opSqrt(cx *EvalContext) error {
func opBitLen(cx *EvalContext) error {
last := len(cx.stack) - 1
- if cx.stack[last].argType() == StackUint64 {
+ if cx.stack[last].avmType() == avmUint64 {
cx.stack[last].Uint = uint64(bits.Len64(cx.stack[last].Uint))
return nil
}
@@ -2335,14 +2590,14 @@ func opMatch(cx *EvalContext) error {
matchedIdx := n
for i, stackArg := range matchList {
- if stackArg.argType() != matchVal.argType() {
+ if stackArg.avmType() != matchVal.avmType() {
continue
}
- if matchVal.argType() == StackBytes && bytes.Equal(matchVal.Bytes, stackArg.Bytes) {
+ if matchVal.avmType() == avmBytes && bytes.Equal(matchVal.Bytes, stackArg.Bytes) {
matchedIdx = i
break
- } else if matchVal.argType() == StackUint64 && matchVal.Uint == stackArg.Uint {
+ } else if matchVal.avmType() == avmUint64 && matchVal.Uint == stackArg.Uint {
matchedIdx = i
break
}
@@ -2480,8 +2735,8 @@ func (cx *EvalContext) assetHoldingToValue(holding *basics.AssetHolding, fs asse
return sv, fmt.Errorf("invalid asset_holding_get field %d", fs.field)
}
- if fs.ftype != sv.argType() {
- return sv, fmt.Errorf("%s expected field type is %s but got %s", fs.field, fs.ftype, sv.argType())
+ if fs.ftype.AVMType != sv.avmType() {
+ return sv, fmt.Errorf("%s expected field type is %s but got %s", fs.field, fs.ftype, sv.avmType())
}
return sv, nil
}
@@ -2516,8 +2771,8 @@ func (cx *EvalContext) assetParamsToValue(params *basics.AssetParams, creator ba
return sv, fmt.Errorf("invalid asset_params_get field %d", fs.field)
}
- if fs.ftype != sv.argType() {
- return sv, fmt.Errorf("%s expected field type is %s but got %s", fs.field, fs.ftype, sv.argType())
+ if fs.ftype.AVMType != sv.avmType() {
+ return sv, fmt.Errorf("%s expected field type is %s but got %s", fs.field, fs.ftype, sv.avmType())
}
return sv, nil
}
@@ -2543,8 +2798,8 @@ func (cx *EvalContext) appParamsToValue(params *basics.AppParams, fs appParamsFi
return sv, fmt.Errorf("invalid app_params_get field %d", fs.field)
}
- if fs.ftype != sv.argType() {
- return sv, fmt.Errorf("%s expected field type is %s but got %s", fs.field, fs.ftype, sv.argType())
+ if fs.ftype.AVMType != sv.avmType() {
+ return sv, fmt.Errorf("%s expected field type is %s but got %s", fs.field, fs.ftype, sv.avmType())
}
return sv, nil
}
@@ -2877,8 +3132,8 @@ func (cx *EvalContext) txnFieldToStack(stxn *transactions.SignedTxnWithAD, fs *t
return sv, fmt.Errorf("invalid txn field %s", fs.field)
}
- if fs.ftype != sv.argType() {
- return sv, fmt.Errorf("%s expected field type is %s but got %s", fs.field, fs.ftype, sv.argType())
+ if fs.ftype.AVMType != sv.avmType() {
+ return sv, fmt.Errorf("%s expected field type is %s but got %s", fs.field, fs.ftype, sv.avmType())
}
return sv, nil
}
@@ -3256,14 +3511,14 @@ func (cx *EvalContext) getLatestTimestamp() (uint64, error) {
}
// getApplicationAddress memoizes app.Address() across a tx group's evaluation
-func (cx *EvalContext) getApplicationAddress(app basics.AppIndex) basics.Address {
+func (ep *EvalParams) getApplicationAddress(app basics.AppIndex) basics.Address {
/* Do not instantiate the cache here, that would mask a programming error.
The cache must be instantiated at EvalParams construction time, so that
proper sharing with inner EvalParams can work. */
- appAddr, ok := cx.appAddrCache[app]
+ appAddr, ok := ep.appAddrCache[app]
if !ok {
appAddr = app.Address()
- cx.appAddrCache[app] = appAddr
+ ep.appAddrCache[app] = appAddr
}
return appAddr
@@ -3325,8 +3580,8 @@ func (cx *EvalContext) globalFieldToValue(fs globalFieldSpec) (sv stackValue, er
err = fmt.Errorf("invalid global field %d", fs.field)
}
- if fs.ftype != sv.argType() {
- return sv, fmt.Errorf("%s expected field type is %s but got %s", fs.field, fs.ftype, sv.argType())
+ if fs.ftype.AVMType != sv.avmType() {
+ return sv, fmt.Errorf("%s expected field type is %s but got %s", fs.field, fs.ftype, sv.avmType())
}
return sv, err
@@ -3754,7 +4009,7 @@ func opGetBit(cx *EvalContext) error {
target := cx.stack[prev]
var bit uint64
- if target.argType() == StackUint64 {
+ if target.avmType() == avmUint64 {
if idx > 63 {
return errors.New("getbit index > 63 with with Uint")
}
@@ -3796,7 +4051,7 @@ func opSetBit(cx *EvalContext) error {
return errors.New("setbit value > 1")
}
- if target.argType() == StackUint64 {
+ if target.avmType() == avmUint64 {
if idx > 63 {
return errors.New("setbit index > 63 with Uint")
}
@@ -4003,70 +4258,134 @@ func opExtract64Bits(cx *EvalContext) error {
return opExtractNBytes(cx, 8) // extract 8 bytes
}
+// assignAccount is used to convert a stackValue into a 32-byte account value,
+// enforcing any "availability" restrictions in force.
+func (cx *EvalContext) assignAccount(sv stackValue) (basics.Address, error) {
+ addr, err := sv.address()
+ if err != nil {
+ return basics.Address{}, err
+ }
+
+ if cx.availableAccount(addr) {
+ return addr, nil
+ }
+ return basics.Address{}, fmt.Errorf("invalid Account reference %s", addr)
+}
+
// accountReference yields the address and Accounts offset designated by a
// stackValue. If the stackValue is the app account, an account of an app in
-// created.apps, or an account of an app in foreignApps, and it is not in the
-// Accounts array, then len(Accounts) + 1 is returned as the index. This would
-// let us catch the mistake if the index is used for set/del. If the txn somehow
-// "psychically" predicted the address, and therefore it IS in txn.Accounts,
-// then happy day, we can set/del it. Return the proper index.
-
-// If we ever want apps to be able to change local state on these accounts
-// (which includes this app's own account!), we will need a change to
-// EvalDelta's on disk format, so that the addr can be encoded explicitly rather
-// than by index into txn.Accounts.
+// created.apps, an account of an app in foreignApps, or an account made
+// available by another txn, and it is not in the Accounts array, then
+// len(Accounts) + 1 is returned as the index. This would let us catch the
+// mistake if the index is used for set/del. If the txn somehow "psychically"
+// predicted the address, and therefore it IS in txn.Accounts, then happy day,
+// we can set/del it. Return the proper index.
+
+// Starting in v9, apps can change local state on these accounts by adding the
+// address to EvalDelta.SharedAccounts and indexing it there. But at this level,
+// we still report the "failure" to find an index with `len(Accounts)+1` That
+// value allows mutableAccountReference to decide whether to report an error or
+// not, based on version.
func (cx *EvalContext) accountReference(account stackValue) (basics.Address, uint64, error) {
- if account.argType() == StackUint64 {
+ addr, idx, err := cx.resolveAccount(account)
+ if err != nil {
+ return addr, 0, err
+ }
+
+ if idx >= 0 {
+ return addr, uint64(idx), err
+ }
+ // negative idx tells us we can't return the idx into
+ // txn.Accounts, but the account might still be available (because it was
+ // created earlier in the group, or because of group sharing)
+ ok := cx.availableAccount(addr)
+ if !ok {
+ return addr, 0, fmt.Errorf("invalid Account reference %s", addr)
+ }
+ // available, but not in txn.Accounts. Return 1 higher to signal.
+ return addr, uint64(len(cx.txn.Txn.Accounts) + 1), nil
+}
+
+// resolveAccount determines the Address and slot indicated by a stackValue, so
+// it is either confirming that the bytes is indeed 32 bytes (and trying to find
+// it in txn.Accounts or returning -1), or it is performing the lookup of the
+// integer arg in txn.Accounts.
+func (cx *EvalContext) resolveAccount(account stackValue) (basics.Address, int, error) {
+ if account.avmType() == avmUint64 {
addr, err := cx.txn.Txn.AddressByIndex(account.Uint, cx.txn.Txn.Sender)
- return addr, account.Uint, err
+ return addr, int(account.Uint), err
}
addr, err := account.address()
if err != nil {
- return addr, 0, err
+ return addr, -1, err
}
+
idx, err := cx.txn.Txn.IndexByAddress(addr, cx.txn.Txn.Sender)
+ if err != nil {
+ // we don't want to convey `err`, because the supplied `account` does
+ // seem to be an address, but we can't give a valid index.
+ return addr, -1, nil //nolint:nilerr // see above comment
+ }
+ return addr, int(idx), nil
+}
+
+func (cx *EvalContext) availableAccount(addr basics.Address) bool {
+ _, err := cx.txn.Txn.IndexByAddress(addr, cx.txn.Txn.Sender)
+ if err == nil {
+ return true
+ }
- invalidIndex := uint64(len(cx.txn.Txn.Accounts) + 1)
// Allow an address for an app that was created in group
- if err != nil && cx.version >= createdResourcesVersion {
- for _, appID := range cx.available.apps {
+ if cx.version >= createdResourcesVersion {
+ for appID := range cx.available.createdApps {
createdAddress := cx.getApplicationAddress(appID)
if addr == createdAddress {
- return addr, invalidIndex, nil
+ return true
}
}
}
+ // or some other txn mentioned it
+ if cx.version >= sharedResourcesVersion {
+ if _, ok := cx.available.sharedAccounts[addr]; ok {
+ return true
+ }
+ }
+
// Allow an address for an app that was provided in the foreign apps array.
- if err != nil && cx.version >= appAddressAvailableVersion {
+ if cx.version >= appAddressAvailableVersion {
for _, appID := range cx.txn.Txn.ForeignApps {
foreignAddress := cx.getApplicationAddress(appID)
if addr == foreignAddress {
- return addr, invalidIndex, nil
+ return true
}
}
}
- // this app's address is also allowed
- if err != nil {
- appAddr := cx.getApplicationAddress(cx.appID)
- if appAddr == addr {
- return addr, invalidIndex, nil
- }
+ if cx.getApplicationAddress(cx.appID) == addr {
+ return true
}
- return addr, idx, err
+ return false
}
func (cx *EvalContext) mutableAccountReference(account stackValue) (basics.Address, uint64, error) {
addr, accountIdx, err := cx.accountReference(account)
- if err == nil && accountIdx > uint64(len(cx.txn.Txn.Accounts)) {
+ if err != nil {
+ return basics.Address{}, 0, err
+ }
+ if accountIdx > uint64(len(cx.txn.Txn.Accounts)) {
// There was no error, but accountReference has signaled that accountIdx
// is not for mutable ops (because it can't encode it in EvalDelta)
- // This also tells us that account.address() will work.
- addr, _ := account.address()
- err = fmt.Errorf("invalid Account reference for mutation %s", addr)
+ if cx.version < sharedResourcesVersion {
+ return basics.Address{}, 0, fmt.Errorf("invalid Account reference for mutation %s", addr)
+ }
+ // fall through, which means that starting in v9, the accountIdx
+ // returned can be > len(tx.Accounts). It will end up getting passed to
+ // GetLocal, which can record that index in order to produce old-style
+ // EDS. But those EDs are only made in old consenus versions - at that
+ // point v9 did not exist, so no backward incompatible change occurs.
}
return addr, accountIdx, err
}
@@ -4111,12 +4430,7 @@ func opAppOptedIn(cx *EvalContext) error {
last := len(cx.stack) - 1 // app
prev := last - 1 // account
- addr, _, err := cx.accountReference(cx.stack[prev])
- if err != nil {
- return err
- }
-
- app, err := appReference(cx, cx.stack[last].Uint, false)
+ addr, app, _, err := cx.localsReference(cx.stack[prev], cx.stack[last].Uint)
if err != nil {
return err
}
@@ -4167,12 +4481,7 @@ func opAppLocalGetEx(cx *EvalContext) error {
}
func opAppLocalGetImpl(cx *EvalContext, appID uint64, key []byte, acct stackValue) (result stackValue, ok bool, err error) {
- addr, accountIdx, err := cx.accountReference(acct)
- if err != nil {
- return
- }
-
- app, err := appReference(cx, appID, false)
+ addr, app, accountIdx, err := cx.localsReference(acct, appID)
if err != nil {
return
}
@@ -4189,7 +4498,7 @@ func opAppLocalGetImpl(cx *EvalContext, appID uint64, key []byte, acct stackValu
}
func opAppGetGlobalStateImpl(cx *EvalContext, appIndex uint64, key []byte) (result stackValue, ok bool, err error) {
- app, err := appReference(cx, appIndex, true)
+ app, err := cx.appReference(appIndex, true)
if err != nil {
return
}
@@ -4235,6 +4544,32 @@ func opAppGlobalGetEx(cx *EvalContext) error {
return nil
}
+// ensureLocalDelta is used to get accountIdx that is usable in the LocalDeltas
+// of the EvalDelta. The input accountIdx is "tentative" - if it's longer than
+// txn.Accounts, then we may need to add the address into SharedAccounts, and
+// index into it.
+func (cx *EvalContext) ensureLocalDelta(accountIdx uint64, addr basics.Address) uint64 {
+ if accountIdx > uint64(len(cx.txn.Txn.Accounts)) {
+ // the returned accountIdx was just a signal that the account was
+ // not in txn, so we look in SharedAccounts, allocating space if needed.
+ found := false
+ for i, shared := range cx.txn.EvalDelta.SharedAccts {
+ if shared == addr {
+ found = true
+ accountIdx = uint64(len(cx.txn.Txn.Accounts) + 1 + i)
+ }
+ }
+ if !found {
+ cx.txn.EvalDelta.SharedAccts = append(cx.txn.EvalDelta.SharedAccts, addr)
+ accountIdx = uint64(len(cx.txn.Txn.Accounts) + len(cx.txn.EvalDelta.SharedAccts))
+ }
+ }
+ if _, ok := cx.txn.EvalDelta.LocalDeltas[accountIdx]; !ok {
+ cx.txn.EvalDelta.LocalDeltas[accountIdx] = basics.StateDelta{}
+ }
+ return accountIdx
+}
+
func opAppLocalPut(cx *EvalContext) error {
last := len(cx.stack) - 1 // value
prev := last - 1 // state key
@@ -4264,9 +4599,7 @@ func opAppLocalPut(cx *EvalContext) error {
tv := sv.toTealValue()
if !ok || tv != etv {
- if _, ok := cx.txn.EvalDelta.LocalDeltas[accountIdx]; !ok {
- cx.txn.EvalDelta.LocalDeltas[accountIdx] = basics.StateDelta{}
- }
+ accountIdx = cx.ensureLocalDelta(accountIdx, addr)
cx.txn.EvalDelta.LocalDeltas[accountIdx][key] = tv.ToValueDelta()
}
@@ -4351,9 +4684,7 @@ func opAppLocalDel(cx *EvalContext) error {
if err != nil {
return err
}
- if _, ok := cx.txn.EvalDelta.LocalDeltas[accountIdx]; !ok {
- cx.txn.EvalDelta.LocalDeltas[accountIdx] = basics.StateDelta{}
- }
+ accountIdx = cx.ensureLocalDelta(accountIdx, addr)
cx.txn.EvalDelta.LocalDeltas[accountIdx][key] = basics.ValueDelta{
Action: basics.DeleteAction,
}
@@ -4398,83 +4729,213 @@ func opAppGlobalDel(cx *EvalContext) error {
// more than 2 or so, and was often called an "index". But it was not a
// basics.AssetIndex or basics.ApplicationIndex.
-func appReference(cx *EvalContext, ref uint64, foreign bool) (basics.AppIndex, error) {
+func (cx *EvalContext) appReference(ref uint64, foreign bool) (aid basics.AppIndex, err error) {
if cx.version >= directRefEnabledVersion {
- if ref == 0 || ref == uint64(cx.appID) {
- return cx.appID, nil
- }
- for _, appID := range cx.txn.Txn.ForeignApps {
- if appID == basics.AppIndex(ref) {
- return appID, nil
- }
- }
- // or was created in group
- if cx.version >= createdResourcesVersion {
- for _, appID := range cx.available.apps {
- if appID == basics.AppIndex(ref) {
- return appID, nil
- }
+ return cx.resolveApp(ref)
+ }
+
+ // resolveApp is already similarly protected (and must be, since it is
+ // called independently)
+ if cx.Proto.AppForbidLowResources {
+ defer func() {
+ if aid <= lastForbiddenResource && err == nil {
+ err = fmt.Errorf("low App lookup %d", aid)
}
- }
- // Allow use of indexes, but this comes last so that clear advice can be
- // given to anyone who cares about semantics in the first few rounds of
- // a new network - don't use indexes for references, use the App ID
+ }()
+ }
+ // Old rules, pre directRefEnabledVersion, when a ref has to be a slot for
+ // some opcodes, and had to be an ID for others.
+ if ref == 0 { // Even back when expected to be a real ID, ref = 0 was current app
+ return cx.appID, nil
+ }
+ if foreign {
+ // In old versions, a foreign reference must be an index in ForeignApps or 0
if ref <= uint64(len(cx.txn.Txn.ForeignApps)) {
return basics.AppIndex(cx.txn.Txn.ForeignApps[ref-1]), nil
}
- } else {
- // Old rules
- if ref == 0 { // Even back when expected to be a real ID, ref = 0 was current app
- return cx.appID, nil
- }
- if foreign {
- // In old versions, a foreign reference must be an index in ForeignAssets or 0
- if ref <= uint64(len(cx.txn.Txn.ForeignApps)) {
- return basics.AppIndex(cx.txn.Txn.ForeignApps[ref-1]), nil
- }
- } else {
- // Otherwise it's direct
- return basics.AppIndex(ref), nil
- }
+ return 0, fmt.Errorf("App index %d beyond txn.ForeignApps", ref)
}
- return basics.AppIndex(0), fmt.Errorf("invalid App reference %d", ref)
+ // Otherwise it's direct
+ return basics.AppIndex(ref), nil
}
-func asaReference(cx *EvalContext, ref uint64, foreign bool) (basics.AssetIndex, error) {
- if cx.version >= directRefEnabledVersion {
- for _, assetID := range cx.txn.Txn.ForeignAssets {
- if assetID == basics.AssetIndex(ref) {
- return assetID, nil
+// resolveApp figures out what App an integer is referring to, considering 0 as
+// current app first, then uses the integer as is if it is an availableApp, then
+// tries to perform a slot lookup.
+func (cx *EvalContext) resolveApp(ref uint64) (aid basics.AppIndex, err error) {
+ if cx.Proto.AppForbidLowResources {
+ defer func() {
+ if aid <= lastForbiddenResource && err == nil {
+ err = fmt.Errorf("low App lookup %d", aid)
}
+ }()
+ }
+
+ if ref == 0 || ref == uint64(cx.appID) {
+ return cx.appID, nil
+ }
+ aid = basics.AppIndex(ref)
+ if cx.availableApp(aid) {
+ return aid, nil
+ }
+
+ // Allow use of indexes, but this comes last so that clear advice can be
+ // given to anyone who cares about semantics in the first few rounds of
+ // a new network - don't use indexes for references, use the App ID
+ if ref <= uint64(len(cx.txn.Txn.ForeignApps)) {
+ return basics.AppIndex(cx.txn.Txn.ForeignApps[ref-1]), nil
+ }
+ return 0, fmt.Errorf("unavailable App %d", ref)
+}
+
+// localsReference has the main job of resolving the account (as bytes or u64)
+// and the App, taking access rules into account. It has the funny side job of
+// also reporting which "slot" the address appears in, if it is in txn.Accounts
+// (or is the Sender, which yields 0). But it only needs to do this funny side
+// job in certainly old versions that need the slot index while doing a lookup.
+func (cx *EvalContext) localsReference(account stackValue, ref uint64) (basics.Address, basics.AppIndex, uint64, error) {
+ if cx.version >= sharedResourcesVersion {
+ addr, _, err := cx.resolveAccount(account)
+ if err != nil {
+ return basics.Address{}, 0, 0, err
}
- // or was created in group
- if cx.version >= createdResourcesVersion {
- for _, assetID := range cx.available.asas {
- if assetID == basics.AssetIndex(ref) {
- return assetID, nil
- }
+ aid, err := cx.resolveApp(ref)
+ if err == nil {
+ if cx.allowsLocals(addr, aid) {
+ return addr, aid, 0, nil // >v9 caller doesn't care about slot
}
}
- // Allow use of indexes, but this comes last so that clear advice can be
- // given to anyone who cares about semantics in the first few rounds of
- // a new network - don't use indexes for references, use the asa ID.
+
+ // Do an extra check to give a better error. The app is definitely
+ // available. If the addr is too, then the trouble is they must have
+ // come from different transactions, and the HOLDING is the problem.
+
+ acctOK := cx.availableAccount(addr)
+ switch {
+ case err != nil && acctOK:
+ // do nothing, err contains the an Asset specific problem
+ case err == nil && acctOK:
+ // although both are available, the LOCALS are not
+ err = fmt.Errorf("unavailable Local State %s x %d", addr, aid)
+ case err != nil && !acctOK:
+ err = fmt.Errorf("unavailable Account %s, %w", addr, err)
+ case err == nil && !acctOK:
+ err = fmt.Errorf("unavailable Account %s", addr)
+ }
+
+ return basics.Address{}, 0, 0, err
+ }
+
+ // Pre group resource sharing, the rule is just that account and app are
+ // each available.
+ addr, addrIdx, err := cx.accountReference(account)
+ if err != nil {
+ return basics.Address{}, 0, 0, err
+ }
+ app, err := cx.appReference(ref, false)
+ if err != nil {
+ return basics.Address{}, 0, 0, err
+ }
+ return addr, app, addrIdx, nil
+}
+
+func (cx *EvalContext) assetReference(ref uint64, foreign bool) (aid basics.AssetIndex, err error) {
+ if cx.version >= directRefEnabledVersion {
+ return cx.resolveAsset(ref)
+ }
+
+ // resolveAsset is already similarly protected (and must be, since it is
+ // called independently)
+ if cx.Proto.AppForbidLowResources {
+ defer func() {
+ if aid <= lastForbiddenResource && err == nil {
+ err = fmt.Errorf("low Asset lookup %d", aid)
+ }
+ }()
+ }
+ // Old rules, pre directRefEnabledVersion, when a ref has to be a slot for
+ // some opcodes, and had to be an ID for others.
+ if foreign {
+ // In old versions, a foreign reference must be an index in ForeignAssets
if ref < uint64(len(cx.txn.Txn.ForeignAssets)) {
return basics.AssetIndex(cx.txn.Txn.ForeignAssets[ref]), nil
}
- } else {
- // Old rules
- if foreign {
- // In old versions, a foreign reference must be an index in ForeignAssets
- if ref < uint64(len(cx.txn.Txn.ForeignAssets)) {
- return basics.AssetIndex(cx.txn.Txn.ForeignAssets[ref]), nil
+ return 0, fmt.Errorf("Asset index %d beyond txn.ForeignAssets", ref)
+ }
+ // Otherwise it's direct
+ return basics.AssetIndex(ref), nil
+}
+
+const lastForbiddenResource = 255
+
+// resolveAsset figures out what Asset an integer is referring to, considering 0 as
+// current app first, then uses the integer as is if it is an availableAsset, then
+// tries to perform a slot lookup.
+func (cx *EvalContext) resolveAsset(ref uint64) (aid basics.AssetIndex, err error) {
+ if cx.Proto.AppForbidLowResources {
+ defer func() {
+ if aid <= lastForbiddenResource && err == nil {
+ err = fmt.Errorf("low Asset lookup %d", aid)
+ }
+ }()
+ }
+ aid = basics.AssetIndex(ref)
+ if cx.availableAsset(aid) {
+ return aid, nil
+ }
+
+ // Allow use of indexes, but this comes last so that clear advice can be
+ // given to anyone who cares about semantics in the first few rounds of
+ // a new network - don't use indexes for references, use the Asset ID
+ if ref < uint64(len(cx.txn.Txn.ForeignAssets)) {
+ return basics.AssetIndex(cx.txn.Txn.ForeignAssets[ref]), nil
+ }
+ return 0, fmt.Errorf("unavailable Asset %d", ref)
+}
+
+func (cx *EvalContext) holdingReference(account stackValue, ref uint64) (basics.Address, basics.AssetIndex, error) {
+ if cx.version >= sharedResourcesVersion {
+ addr, _, err := cx.resolveAccount(account)
+ if err != nil {
+ return basics.Address{}, 0, err
+ }
+ aid, err := cx.resolveAsset(ref)
+ if err == nil {
+ if cx.allowsHolding(addr, aid) {
+ return addr, aid, nil
}
- } else {
- // Otherwise it's direct
- return basics.AssetIndex(ref), nil
}
+
+ // Do an extra check to give a better error. The asset is definitely
+ // available. If the addr is too, then the trouble is they must have
+ // come from different transactions, and the HOLDING is the problem.
+
+ acctOK := cx.availableAccount(addr)
+ switch {
+ case err != nil && acctOK:
+ // do nothing, err contains the an Asset specific problem
+ case err == nil && acctOK:
+ // although both are available, the HOLDING is not
+ err = fmt.Errorf("unavailable Holding %s x %d", addr, aid)
+ case err != nil && !acctOK:
+ err = fmt.Errorf("unavailable Account %s, %w", addr, err)
+ case err == nil && !acctOK:
+ err = fmt.Errorf("unavailable Account %s", addr)
+ }
+ return basics.Address{}, 0, err
}
- return basics.AssetIndex(0), fmt.Errorf("invalid Asset reference %d", ref)
+ // Pre group resource sharing, the rule is just that account and asset are
+ // each available.
+ addr, _, err := cx.accountReference(account)
+ if err != nil {
+ return basics.Address{}, 0, err
+ }
+ asset, err := cx.assetReference(ref, false)
+ if err != nil {
+ return basics.Address{}, 0, err
+ }
+ return addr, asset, nil
}
func opAssetHoldingGet(cx *EvalContext) error {
@@ -4487,12 +4948,7 @@ func opAssetHoldingGet(cx *EvalContext) error {
return fmt.Errorf("invalid asset_holding_get field %d", holdingField)
}
- addr, _, err := cx.accountReference(cx.stack[prev])
- if err != nil {
- return err
- }
-
- asset, err := asaReference(cx, cx.stack[last].Uint, false)
+ addr, asset, err := cx.holdingReference(cx.stack[prev], cx.stack[last].Uint)
if err != nil {
return err
}
@@ -4500,7 +4956,7 @@ func opAssetHoldingGet(cx *EvalContext) error {
var exist uint64 = 0
var value stackValue
if holding, err := cx.Ledger.AssetHolding(addr, asset); err == nil {
- // the holding exist, read the value
+ // the holding exists, read the value
exist = 1
value, err = cx.assetHoldingToValue(&holding, fs)
if err != nil {
@@ -4522,7 +4978,7 @@ func opAssetParamsGet(cx *EvalContext) error {
return fmt.Errorf("invalid asset_params_get field %d", paramField)
}
- asset, err := asaReference(cx, cx.stack[last].Uint, true)
+ asset, err := cx.assetReference(cx.stack[last].Uint, true)
if err != nil {
return err
}
@@ -4552,7 +5008,7 @@ func opAppParamsGet(cx *EvalContext) error {
return fmt.Errorf("invalid app_params_get field %d", paramField)
}
- app, err := appReference(cx, cx.stack[last].Uint, true)
+ app, err := cx.appReference(cx.stack[last].Uint, true)
if err != nil {
return err
}
@@ -4639,13 +5095,13 @@ func opAcctParamsGet(cx *EvalContext) error {
func opLog(cx *EvalContext) error {
last := len(cx.stack) - 1
- if len(cx.txn.EvalDelta.Logs) >= maxLogCalls {
- return fmt.Errorf("too many log calls in program. up to %d is allowed", maxLogCalls)
+ if uint64(len(cx.txn.EvalDelta.Logs)) >= cx.MaxLogCalls {
+ return fmt.Errorf("too many log calls in program. up to %d is allowed", cx.MaxLogCalls)
}
log := cx.stack[last]
cx.logSize += len(log.Bytes)
- if cx.logSize > maxLogSize {
- return fmt.Errorf("program logs too large. %d bytes > %d bytes limit", cx.logSize, maxLogSize)
+ if uint64(cx.logSize) > cx.MaxLogSize {
+ return fmt.Errorf("program logs too large. %d bytes > %d bytes limit", cx.logSize, cx.MaxLogSize)
}
cx.txn.EvalDelta.Logs = append(cx.txn.EvalDelta.Logs, string(log.Bytes))
cx.stack = cx.stack[:last]
@@ -4726,76 +5182,95 @@ func opItxnNext(cx *EvalContext) error {
return addInnerTxn(cx)
}
-// availableAccount is used instead of accountReference for more recent opcodes
-// that don't need (or want!) to allow low numbers to represent the account at
-// that index in Accounts array.
-func (cx *EvalContext) availableAccount(sv stackValue) (basics.Address, error) {
- if sv.argType() != StackBytes || len(sv.Bytes) != crypto.DigestSize {
- return basics.Address{}, fmt.Errorf("not an address")
- }
-
- addr, _, err := cx.accountReference(sv)
- return addr, err
-}
-
-// availableAsset is used instead of asaReference for more recent opcodes that
-// don't need (or want!) to allow low numbers to represent the asset at that
-// index in ForeignAssets array.
-func (cx *EvalContext) availableAsset(sv stackValue) (basics.AssetIndex, error) {
+// assignAsset is used to convert a stackValue to a uint64 assetIndex, reporting
+// any errors due to availability rules or type checking.
+func (cx *EvalContext) assignAsset(sv stackValue) (basics.AssetIndex, error) {
uint, err := sv.uint()
if err != nil {
- return basics.AssetIndex(0), err
+ return 0, err
}
aid := basics.AssetIndex(uint)
+ if cx.availableAsset(aid) {
+ return aid, nil
+ }
+
+ return 0, fmt.Errorf("unavailable Asset %d", aid)
+}
+
+// availableAsset determines whether an asset is "available". Before
+// sharedResourcesVersion, an asset had to be available for asset param
+// lookups, asset holding lookups, and asset id assignments to inner
+// transactions. After sharedResourcesVersion, the distinction must be more fine
+// grained. It must be available for asset param lookups, or use in an asset
+// transaction (axfer,acfg,afrz), but not for holding lookups or assignments to
+// an inner static array.
+func (cx *EvalContext) availableAsset(aid basics.AssetIndex) bool {
// Ensure that aid is in Foreign Assets
for _, assetID := range cx.txn.Txn.ForeignAssets {
if assetID == aid {
- return aid, nil
+ return true
}
}
// or was created in group
if cx.version >= createdResourcesVersion {
- for _, assetID := range cx.available.asas {
- if assetID == aid {
- return aid, nil
- }
+ if _, ok := cx.available.createdAsas[aid]; ok {
+ return true
}
}
- return basics.AssetIndex(0), fmt.Errorf("invalid Asset reference %d", aid)
+ // or some other txn mentioned it
+ if cx.version >= sharedResourcesVersion {
+ if _, ok := cx.available.sharedAsas[aid]; ok {
+ return true
+ }
+ }
+
+ return false
}
-// availableApp is used instead of appReference for more recent (stateful)
-// opcodes that don't need (or want!) to allow low numbers to represent the app
-// at that index in ForeignApps array.
-func (cx *EvalContext) availableApp(sv stackValue) (basics.AppIndex, error) {
+// assignApp is used to convert a stackValue to a uint64 appIndex, reporting
+// any errors due to availability rules or type checking.
+func (cx *EvalContext) assignApp(sv stackValue) (basics.AppIndex, error) {
uint, err := sv.uint()
if err != nil {
- return basics.AppIndex(0), err
+ return 0, err
}
aid := basics.AppIndex(uint)
+ if cx.availableApp(aid) {
+ return aid, nil
+ }
+
+ return 0, fmt.Errorf("unavailable App %d", aid)
+}
+
+func (cx *EvalContext) availableApp(aid basics.AppIndex) bool {
// Ensure that aid is in Foreign Apps
for _, appID := range cx.txn.Txn.ForeignApps {
if appID == aid {
- return aid, nil
+ return true
}
}
// or was created in group
if cx.version >= createdResourcesVersion {
- for _, appID := range cx.available.apps {
- if appID == aid {
- return aid, nil
- }
+ if _, ok := cx.available.createdApps[aid]; ok {
+ return true
}
}
// Or, it can be the current app
if cx.appID == aid {
- return aid, nil
+ return true
}
- return 0, fmt.Errorf("invalid App reference %d", aid)
+ // or some other txn mentioned it
+ if cx.version >= sharedResourcesVersion {
+ if _, ok := cx.available.sharedApps[aid]; ok {
+ return true
+ }
+ }
+
+ return false
}
func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs *txnFieldSpec, txn *transactions.Transaction) (err error) {
@@ -4829,7 +5304,7 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs *txnFieldSpec, txn *t
return fmt.Errorf("%d is not a valid TypeEnum", i)
}
case Sender:
- txn.Sender, err = cx.availableAccount(sv)
+ txn.Sender, err = cx.assignAccount(sv)
case Fee:
txn.Fee.Raw, err = sv.uint()
// FirstValid, LastValid unsettable: little motivation (maybe a app call
@@ -4879,25 +5354,25 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs *txnFieldSpec, txn *t
// Payment
case Receiver:
- txn.Receiver, err = cx.availableAccount(sv)
+ txn.Receiver, err = cx.assignAccount(sv)
case Amount:
txn.Amount.Raw, err = sv.uint()
case CloseRemainderTo:
- txn.CloseRemainderTo, err = cx.availableAccount(sv)
+ txn.CloseRemainderTo, err = cx.assignAccount(sv)
// AssetTransfer
case XferAsset:
- txn.XferAsset, err = cx.availableAsset(sv)
+ txn.XferAsset, err = cx.assignAsset(sv)
case AssetAmount:
txn.AssetAmount, err = sv.uint()
case AssetSender:
- txn.AssetSender, err = cx.availableAccount(sv)
+ txn.AssetSender, err = cx.assignAccount(sv)
case AssetReceiver:
- txn.AssetReceiver, err = cx.availableAccount(sv)
+ txn.AssetReceiver, err = cx.assignAccount(sv)
case AssetCloseTo:
- txn.AssetCloseTo, err = cx.availableAccount(sv)
+ txn.AssetCloseTo, err = cx.assignAccount(sv)
// AssetConfig
case ConfigAsset:
- txn.ConfigAsset, err = cx.availableAsset(sv)
+ txn.ConfigAsset, err = cx.assignAsset(sv)
case ConfigAssetTotal:
txn.AssetParams.Total, err = sv.uint()
case ConfigAssetDecimals:
@@ -4933,15 +5408,15 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs *txnFieldSpec, txn *t
txn.AssetParams.Clawback, err = sv.address()
// Freeze
case FreezeAsset:
- txn.FreezeAsset, err = cx.availableAsset(sv)
+ txn.FreezeAsset, err = cx.assignAsset(sv)
case FreezeAssetAccount:
- txn.FreezeAccount, err = cx.availableAccount(sv)
+ txn.FreezeAccount, err = cx.assignAccount(sv)
case FreezeAssetFrozen:
txn.AssetFrozen, err = sv.bool()
// ApplicationCall
case ApplicationID:
- txn.ApplicationID, err = cx.availableApp(sv)
+ txn.ApplicationID, err = cx.assignApp(sv)
case OnCompletion:
var onc uint64
onc, err = sv.uintMaxed(uint64(transactions.DeleteApplicationOC))
@@ -4965,7 +5440,7 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs *txnFieldSpec, txn *t
txn.ApplicationArgs = append(txn.ApplicationArgs, new)
case Accounts:
var new basics.Address
- new, err = cx.availableAccount(sv)
+ new, err = cx.assignAccount(sv)
if err != nil {
return err
}
@@ -5001,7 +5476,7 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs *txnFieldSpec, txn *t
}
case Assets:
var new basics.AssetIndex
- new, err = cx.availableAsset(sv)
+ new, err = cx.assignAsset(sv)
if err != nil {
return err
}
@@ -5011,7 +5486,7 @@ func (cx *EvalContext) stackIntoTxnField(sv stackValue, fs *txnFieldSpec, txn *t
txn.ForeignAssets = append(txn.ForeignAssets, new)
case Applications:
var new basics.AppIndex
- new, err = cx.availableApp(sv)
+ new, err = cx.assignApp(sv)
if err != nil {
return err
}
@@ -5120,6 +5595,8 @@ func opItxnSubmit(cx *EvalContext) (err error) {
return err
}
+ var calledVersion uint64
+
// Disallow reentrancy, limit inner app call depth, and do version checks
if cx.subtxns[itx].Txn.Type == protocol.ApplicationCallTx {
if cx.appID == cx.subtxns[itx].Txn.ApplicationID {
@@ -5150,13 +5627,13 @@ func opItxnSubmit(cx *EvalContext) (err error) {
}
// Can't call old versions in inner apps.
- v, _, err := transactions.ProgramVersion(program)
+ calledVersion, _, err = transactions.ProgramVersion(program)
if err != nil {
return err
}
- if v < cx.Proto.MinInnerApplVersion {
+ if calledVersion < cx.Proto.MinInnerApplVersion {
return fmt.Errorf("inner app call with version v%d < v%d",
- v, cx.Proto.MinInnerApplVersion)
+ calledVersion, cx.Proto.MinInnerApplVersion)
}
// Don't allow opt-in if the CSP is not runnable as an inner.
@@ -5180,7 +5657,18 @@ func opItxnSubmit(cx *EvalContext) (err error) {
csv, cx.Proto.MinInnerApplVersion)
}
}
+ }
+ // Starting in v9, it's possible for apps to create transactions that
+ // should not be allowed to run, because they require access to
+ // resources that the caller does not have. This can only happen for
+ // Holdings and Local States. The caller might have access to the
+ // account and the asa or app, but not the holding or locals, because
+ // the caller gained access to the two top resources by group sharing
+ // from two different transactions.
+ err = cx.allows(&cx.subtxns[itx].Txn, calledVersion)
+ if err != nil {
+ return err
}
if isGroup {
@@ -5212,7 +5700,7 @@ func opItxnSubmit(cx *EvalContext) (err error) {
ep.Tracer.BeforeTxnGroup(ep)
// Ensure we update the tracer before exiting
defer func() {
- ep.Tracer.AfterTxnGroup(ep, err)
+ ep.Tracer.AfterTxnGroup(ep, nil, err)
}()
}
@@ -5343,8 +5831,8 @@ func opBlock(cx *EvalContext) error {
}
}
-// PcDetails return PC and disassembled instructions at PC up to 2 opcodes back
-func (cx *EvalContext) PcDetails() (pc int, dis string) {
+// pcDetails return PC and disassembled instructions at PC up to 2 opcodes back
+func (cx *EvalContext) pcDetails() (pc int, dis string) {
const maxNumAdditionalOpcodes = 2
text, ds, err := disassembleInstrumented(cx.program, nil)
if err != nil {
@@ -5368,7 +5856,7 @@ func (cx *EvalContext) PcDetails() (pc int, dis string) {
break
}
}
- return cx.pc, dis
+ return cx.pc, strings.ReplaceAll(strings.TrimSuffix(dis, "\n"), "\n", "; ")
}
func base64Decode(encoded []byte, encoding *base64.Encoding) ([]byte, error) {
diff --git a/data/transactions/logic/evalAppTxn_test.go b/data/transactions/logic/evalAppTxn_test.go
index 2886b57a5..c72236a87 100644
--- a/data/transactions/logic/evalAppTxn_test.go
+++ b/data/transactions/logic/evalAppTxn_test.go
@@ -19,6 +19,7 @@ package logic_test
import (
"encoding/hex"
"fmt"
+ "strconv"
"strings"
"testing"
@@ -104,15 +105,16 @@ func TestFieldTypes(t *testing.T) {
t.Parallel()
ep, _, _ := MakeSampleEnv()
- TestApp(t, "itxn_begin; byte \"pay\"; itxn_field Sender;", ep, "not an address")
+ // Use NoTrack to skip assembly errors
+ TestApp(t, NoTrack("itxn_begin; byte \"pay\"; itxn_field Sender;"), ep, "not an address")
TestApp(t, NoTrack("itxn_begin; int 7; itxn_field Receiver;"), ep, "not an address")
- TestApp(t, "itxn_begin; byte \"\"; itxn_field CloseRemainderTo;", ep, "not an address")
- TestApp(t, "itxn_begin; byte \"\"; itxn_field AssetSender;", ep, "not an address")
+ TestApp(t, NoTrack("itxn_begin; byte \"\"; itxn_field CloseRemainderTo;"), ep, "not an address")
+ TestApp(t, NoTrack("itxn_begin; byte \"\"; itxn_field AssetSender;"), ep, "not an address")
// can't really tell if it's an addres, so 32 bytes gets further
- TestApp(t, "itxn_begin; byte \"01234567890123456789012345678901\"; itxn_field AssetReceiver;",
+ TestApp(t, "itxn_begin; byte \"01234567890123456789012345678901\"; itxn_field AssetReceiver; int 1",
ep, "invalid Account reference")
// but a b32 string rep is not an account
- TestApp(t, "itxn_begin; byte \"GAYTEMZUGU3DOOBZGAYTEMZUGU3DOOBZGAYTEMZUGU3DOOBZGAYZIZD42E\"; itxn_field AssetCloseTo;",
+ TestApp(t, NoTrack("itxn_begin; byte \"GAYTEMZUGU3DOOBZGAYTEMZUGU3DOOBZGAYTEMZUGU3DOOBZGAYZIZD42E\"; itxn_field AssetCloseTo;"),
ep, "not an address")
TestApp(t, NoTrack("itxn_begin; byte \"pay\"; itxn_field Fee;"), ep, "not a uint64")
@@ -145,49 +147,60 @@ func TestAppPay(t *testing.T) {
int 1
`
- ep, tx, ledger := MakeSampleEnv()
- ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
- TestApp(t, "txn Sender; balance; int 0; ==;", ep)
- TestApp(t, "txn Sender; txn Accounts 1; int 100"+pay, ep, "unauthorized")
- TestApp(t, "global CurrentApplicationAddress; txn Accounts 1; int 100"+pay, ep,
- "insufficient balance")
- ledger.NewAccount(appAddr(888), 1000000)
-
- // You might NewExpect this to fail because of min balance issue
- // (receiving account only gets 100 microalgos). It does not fail at
- // this level, instead, we must be certain that the existing min
- // balance check in eval.transaction() properly notices and fails
- // the transaction later. This fits with the model that we check
- // min balances once at the end of each "top-level" transaction.
- TestApp(t, "global CurrentApplicationAddress; txn Accounts 1; int 100"+pay, ep)
-
- // 100 of 1000000 spent, plus MinTxnFee in our fake protocol is 1001
- TestApp(t, "global CurrentApplicationAddress; balance; int 998899; ==", ep)
- TestApp(t, "txn Receiver; balance; int 100; ==", ep)
-
- close := `
+ // v5 added inners
+ TestLogicRange(t, 5, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ test := func(source string, problem ...string) {
+ TestApp(t, source, ep, problem...)
+ }
+ ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
+ test("txn Sender; balance; int 0; ==;")
+ test("txn Sender; txn Accounts 1; int 100"+pay, "unauthorized")
+ test("global CurrentApplicationAddress; txn Accounts 1; int 100"+pay,
+ "insufficient balance")
+ ledger.NewAccount(appAddr(888), 1000000)
+
+ // You might NewExpect this to fail because of min balance issue
+ // (receiving account only gets 100 microalgos). It does not fail at
+ // this level, instead, we must be certain that the existing min
+ // balance check in eval.transaction() properly notices and fails
+ // the transaction later. This fits with the model that we check
+ // min balances once at the end of each "top-level" transaction.
+ test("global CurrentApplicationAddress; txn Accounts 1; int 100" + pay)
+
+ // 100 of 1000000 spent, plus MinTxnFee in our fake protocol is 1001
+ test("global CurrentApplicationAddress; balance; int 998899; ==")
+ test("txn Receiver; balance; int 100; ==")
+
+ close := `
itxn_begin
int pay; itxn_field TypeEnum
txn Receiver; itxn_field CloseRemainderTo
itxn_submit
int 1
`
- TestApp(t, close, ep)
- TestApp(t, "global CurrentApplicationAddress; balance; !", ep)
- // Receiver got most of the algos (except 1001 for fee)
- TestApp(t, "txn Receiver; balance; int 997998; ==", ep)
+ test(close)
+ test("global CurrentApplicationAddress; balance; !")
+ // Receiver got most of the algos (except 1001 for fee)
+ test("txn Receiver; balance; int 997998; ==")
+ })
}
func TestAppAssetOptIn(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- ep, tx, ledger := MakeSampleEnv()
- // Establish 888 as the app id, and fund it.
- ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
- ledger.NewAccount(basics.AppIndex(888).Address(), 200000)
+ // v5 added inners
+ TestLogicRange(t, 5, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ test := func(source string, problem ...string) {
+ t.Helper()
+ TestApp(t, source, ep, problem...)
+ }
- axfer := `
+ // Establish 888 as the app id, and fund it.
+ ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
+ ledger.NewAccount(basics.AppIndex(888).Address(), 200000)
+
+ axfer := `
itxn_begin
int axfer; itxn_field TypeEnum;
int 25; itxn_field XferAsset;
@@ -196,10 +209,10 @@ txn Sender; itxn_field AssetReceiver;
itxn_submit
int 1
`
- TestApp(t, axfer, ep, "invalid Asset reference")
- tx.ForeignAssets = append(tx.ForeignAssets, 25)
- TestApp(t, axfer, ep, "not opted in") // app account not opted in
- optin := `
+ test(axfer, "unavailable Asset 25")
+ tx.ForeignAssets = append(tx.ForeignAssets, 25)
+ test(axfer, "not opted in") // app account not opted in
+ optin := `
itxn_begin
int axfer; itxn_field TypeEnum;
int 25; itxn_field XferAsset;
@@ -208,25 +221,25 @@ global CurrentApplicationAddress; itxn_field AssetReceiver;
itxn_submit
int 1
`
- TestApp(t, optin, ep, "does not exist")
- // Asset 25
- ledger.NewAsset(tx.Sender, 25, basics.AssetParams{
- Total: 10,
- UnitName: "x",
- AssetName: "Cross",
- })
- TestApp(t, optin, ep)
+ test(optin, "does not exist")
+ // Asset 25
+ ledger.NewAsset(tx.Sender, 25, basics.AssetParams{
+ Total: 10,
+ UnitName: "x",
+ AssetName: "Cross",
+ })
+ test(optin)
- TestApp(t, axfer, ep, "insufficient balance") // opted in, but balance=0
+ test(axfer, "insufficient balance") // opted in, but balance=0
- // Fund the app account with the asset
- ledger.NewHolding(basics.AppIndex(888).Address(), 25, 5, false)
- TestApp(t, axfer, ep)
- TestApp(t, axfer, ep)
- TestApp(t, axfer, ep, "insufficient balance") // balance = 1, tried to move 2)
- TestApp(t, "global CurrentApplicationAddress; int 25; asset_holding_get AssetBalance; assert; int 1; ==", ep)
+ // Fund the app account with the asset
+ ledger.NewHolding(basics.AppIndex(888).Address(), 25, 5, false)
+ test(axfer)
+ test(axfer)
+ test(axfer, "insufficient balance") // balance = 1, tried to move 2)
+ test("global CurrentApplicationAddress; int 25; asset_holding_get AssetBalance; assert; int 1; ==")
- close := `
+ close := `
itxn_begin
int axfer; itxn_field TypeEnum;
int 25; itxn_field XferAsset;
@@ -236,8 +249,9 @@ txn Sender; itxn_field AssetCloseTo;
itxn_submit
int 1
`
- TestApp(t, close, ep)
- TestApp(t, "global CurrentApplicationAddress; int 25; asset_holding_get AssetBalance; !; assert; !", ep)
+ test(close)
+ test("global CurrentApplicationAddress; int 25; asset_holding_get AssetBalance; !; assert; !")
+ })
}
func TestRekeyPay(t *testing.T) {
@@ -254,16 +268,17 @@ func TestRekeyPay(t *testing.T) {
itxn_submit
`
- ep, tx, ledger := MakeSampleEnv()
- ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
- TestApp(t, "txn Sender; balance; int 0; ==;", ep)
- TestApp(t, "txn Sender; txn Accounts 1; int 100"+pay, ep, "unauthorized")
- ledger.NewAccount(tx.Sender, 120+ep.Proto.MinTxnFee)
- ledger.Rekey(tx.Sender, basics.AppIndex(888).Address())
- TestApp(t, "txn Sender; txn Accounts 1; int 100"+pay+"; int 1", ep)
- // Note that the Sender would fail min balance check if we did it here.
- // It seems proper to wait until end of txn though.
- // See explanation in cowRoundState's Perform()
+ // v5 added inners
+ TestLogicRange(t, 5, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
+ TestApp(t, "txn Sender; txn Accounts 1; int 100"+pay, ep, "unauthorized")
+ ledger.NewAccount(tx.Sender, 120+ep.Proto.MinTxnFee)
+ ledger.Rekey(tx.Sender, basics.AppIndex(888).Address())
+ TestApp(t, "txn Sender; txn Accounts 1; int 100"+pay+"; int 1", ep)
+ // Note that the Sender would fail min balance check if we did it here.
+ // It seems proper to wait until end of txn though.
+ // See explanation in cowRoundState's Perform()
+ })
}
func TestRekeyBack(t *testing.T) {
@@ -282,15 +297,17 @@ func TestRekeyBack(t *testing.T) {
itxn_submit
`
- ep, tx, ledger := MakeSampleEnv()
- ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
- TestApp(t, "txn Sender; balance; int 0; ==;", ep)
- TestApp(t, "txn Sender; txn Accounts 1; int 100"+payAndUnkey, ep, "unauthorized")
- ledger.NewAccount(tx.Sender, 120+3*ep.Proto.MinTxnFee)
- ledger.Rekey(tx.Sender, basics.AppIndex(888).Address())
- TestApp(t, "txn Sender; txn Accounts 1; int 100"+payAndUnkey+"; int 1", ep)
- // now rekeyed back to original
- TestApp(t, "txn Sender; txn Accounts 1; int 100"+payAndUnkey, ep, "unauthorized")
+ // v6 added inner rekey
+ TestLogicRange(t, 6, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
+ TestApp(t, "txn Sender; balance; int 0; ==;", ep)
+ TestApp(t, "txn Sender; txn Accounts 1; int 100"+payAndUnkey, ep, "unauthorized")
+ ledger.NewAccount(tx.Sender, 120+3*ep.Proto.MinTxnFee)
+ ledger.Rekey(tx.Sender, basics.AppIndex(888).Address())
+ TestApp(t, "txn Sender; txn Accounts 1; int 100"+payAndUnkey+"; int 1", ep)
+ // now rekeyed back to original
+ TestApp(t, "txn Sender; txn Accounts 1; int 100"+payAndUnkey, ep, "unauthorized")
+ })
}
func TestDefaultSender(t *testing.T) {
@@ -306,13 +323,15 @@ func TestDefaultSender(t *testing.T) {
itxn_submit
`
- ep, tx, ledger := MakeSampleEnv()
- ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
- tx.Accounts = append(tx.Accounts, appAddr(888))
- TestApp(t, "txn Accounts 1; int 100"+pay, ep, "insufficient balance")
- ledger.NewAccount(appAddr(888), 1000000)
- TestApp(t, "txn Accounts 1; int 100"+pay+"int 1", ep)
- TestApp(t, "global CurrentApplicationAddress; balance; int 998899; ==", ep)
+ // v5 added inners
+ TestLogicRange(t, 5, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
+ tx.Accounts = append(tx.Accounts, appAddr(888))
+ TestApp(t, "txn Accounts 1; int 100"+pay, ep, "insufficient balance")
+ ledger.NewAccount(appAddr(888), 1000000)
+ TestApp(t, "txn Accounts 1; int 100"+pay+"int 1", ep)
+ TestApp(t, "global CurrentApplicationAddress; balance; int 998899; ==", ep)
+ })
}
func TestAppAxfer(t *testing.T) {
@@ -331,36 +350,42 @@ func TestAppAxfer(t *testing.T) {
itxn_submit
`
- ep, tx, ledger := MakeSampleEnv()
- ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
- ledger.NewAsset(tx.Receiver, 777, basics.AssetParams{}) // not in foreign-assets of sample
- ledger.NewAsset(tx.Receiver, 77, basics.AssetParams{}) // in foreign-assets of sample
- TestApp(t, "txn Sender; int 777; asset_holding_get AssetBalance; assert; int 0; ==;", ep,
- "invalid Asset reference") // 777 not in foreign-assets
- TestApp(t, "txn Sender; int 77; asset_holding_get AssetBalance; assert; int 0; ==;", ep,
- "assert failed") // because Sender not opted-in
- TestApp(t, "global CurrentApplicationAddress; int 77; asset_holding_get AssetBalance; assert; int 0; ==;", ep,
- "assert failed") // app account not opted in
-
- ledger.NewAccount(appAddr(888), 10000) // plenty for fees
- ledger.NewHolding(appAddr(888), 77, 3000, false)
- TestApp(t, "global CurrentApplicationAddress; int 77; asset_holding_get AssetBalance; assert; int 3000; ==;", ep)
-
- TestApp(t, "txn Sender; txn Accounts 1; int 100"+axfer, ep, "unauthorized")
- TestApp(t, "global CurrentApplicationAddress; txn Accounts 0; int 100"+axfer, ep,
- fmt.Sprintf("Receiver (%s) not opted in", tx.Sender)) // txn.Sender (receiver of the axfer) isn't opted in
- TestApp(t, "global CurrentApplicationAddress; txn Accounts 1; int 100000"+axfer, ep,
- "insufficient balance")
-
- // Temporarily remove from ForeignAssets to ensure App Account
- // doesn't get some sort of free pass to send arbitrary assets.
- save := tx.ForeignAssets
- tx.ForeignAssets = []basics.AssetIndex{6, 10}
- TestApp(t, "global CurrentApplicationAddress; txn Accounts 1; int 100000"+axfer, ep,
- "invalid Asset reference 77")
- tx.ForeignAssets = save
+ // v5 added inners
+ TestLogicRange(t, 5, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ test := func(source string, problem ...string) {
+ t.Helper()
+ TestApp(t, source, ep, problem...)
+ }
- noid := `
+ ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
+ ledger.NewAsset(tx.Receiver, 777, basics.AssetParams{}) // not in foreign-assets of sample
+ ledger.NewAsset(tx.Receiver, 77, basics.AssetParams{}) // in foreign-assets of sample
+ test("txn Sender; int 777; asset_holding_get AssetBalance; assert; int 0; ==;",
+ "unavailable Asset 777") // 777 not in foreign-assets
+ test("txn Sender; int 77; asset_holding_get AssetBalance; assert; int 0; ==;",
+ "assert failed") // because Sender not opted-in
+ test("global CurrentApplicationAddress; int 77; asset_holding_get AssetBalance; assert; int 0; ==;",
+ "assert failed") // app account not opted in
+
+ ledger.NewAccount(appAddr(888), 10000) // plenty for fees
+ ledger.NewHolding(appAddr(888), 77, 3000, false)
+ test("global CurrentApplicationAddress; int 77; asset_holding_get AssetBalance; assert; int 3000; ==;")
+
+ test("txn Sender; txn Accounts 1; int 100"+axfer, "unauthorized")
+ test("global CurrentApplicationAddress; txn Accounts 0; int 100"+axfer,
+ fmt.Sprintf("Receiver (%s) not opted in", tx.Sender)) // txn.Sender (receiver of the axfer) isn't opted in
+ test("global CurrentApplicationAddress; txn Accounts 1; int 100000"+axfer,
+ "insufficient balance")
+
+ // Temporarily remove from ForeignAssets to ensure App Account
+ // doesn't get some sort of free pass to send arbitrary assets.
+ save := tx.ForeignAssets
+ tx.ForeignAssets = []basics.AssetIndex{6, 10}
+ test("global CurrentApplicationAddress; txn Accounts 1; int 100000"+axfer,
+ "unavailable Asset 77")
+ tx.ForeignAssets = save
+
+ noid := `
itxn_begin
itxn_field AssetAmount
itxn_field AssetReceiver
@@ -369,16 +394,50 @@ func TestAppAxfer(t *testing.T) {
itxn_field TypeEnum
itxn_submit
`
- TestApp(t, "global CurrentApplicationAddress; txn Accounts 1; int 100"+noid+"int 1", ep,
- fmt.Sprintf("Sender (%s) not opted in to 0", appAddr(888)))
- TestApp(t, "global CurrentApplicationAddress; txn Accounts 1; int 100"+axfer+"int 1", ep)
+ // Here, the XferAsset is never set, so it is defaulted to 0. Therefore
+ // v8 and below had no opportunity to complain about the inavailability
+ // of the implied holding. Of course, there is no 0 asset, so the axfer
+ // is going to fail anyway, but to keep the behavior consistent, v9
+ // allows the zero asset (and zero account) in `requireHolding`.
+ test("global CurrentApplicationAddress; txn Accounts 1; int 100"+noid+"int 1",
+ fmt.Sprintf("Sender (%s) not opted in to 0", appAddr(888)))
+
+ test("global CurrentApplicationAddress; txn Accounts 1; int 100" + axfer + "int 1")
- // 100 of 3000 spent
- TestApp(t, "global CurrentApplicationAddress; int 77; asset_holding_get AssetBalance; assert; int 2900; ==", ep)
- TestApp(t, "txn Accounts 1; int 77; asset_holding_get AssetBalance; assert; int 100; ==", ep)
+ // 100 of 3000 spent
+ test("global CurrentApplicationAddress; int 77; asset_holding_get AssetBalance; assert; int 2900; ==")
+ test("txn Accounts 1; int 77; asset_holding_get AssetBalance; assert; int 100; ==")
+ })
}
+func TestInnerAppl(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ appl := `
+ itxn_begin
+ int appl; itxn_field TypeEnum
+ int 56 // present in ForeignApps of sample txn
+ itxn_field ApplicationID
+ itxn_submit
+ int 1
+`
+
+ // v6 added inner appls
+ TestLogicRange(t, 6, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ // Establish 888 as the app id, and fund it.
+ ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
+ ledger.NewAccount(basics.AppIndex(888).Address(), 200000)
+
+ ops := TestProg(t, "int 1", 5)
+ ledger.NewApp(basics.Address{0x01}, 56, basics.AppParams{ApprovalProgram: ops.Program})
+ TestApp(t, appl, ep)
+ })
+}
+
+// TestExtraFields tests that the inner txn fields are not allowed to be set for
+// different transaction type than the one submitted.
func TestExtraFields(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -396,8 +455,6 @@ func TestExtraFields(t *testing.T) {
ep, tx, ledger := MakeSampleEnv()
ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
- TestApp(t, "txn Sender; balance; int 0; ==;", ep)
- TestApp(t, "txn Sender; txn Accounts 1; int 100"+pay, ep, "unauthorized")
TestApp(t, "global CurrentApplicationAddress; txn Accounts 1; int 100"+pay, ep,
"non-zero fields for type axfer")
}
@@ -527,12 +584,12 @@ func TestNumInnerPooled(t *testing.T) {
TestApps(t, []string{short, long}, grp, LogicVersion, ledger)
TestApps(t, []string{long, short}, grp, LogicVersion, ledger)
TestApps(t, []string{long, long}, grp, LogicVersion, ledger,
- NewExpect(1, "too many inner transactions"))
+ Exp(1, "too many inner transactions"))
grp = append(grp, grp[0])
TestApps(t, []string{short, long, long}, grp, LogicVersion, ledger,
- NewExpect(2, "too many inner transactions"))
+ Exp(2, "too many inner transactions"))
TestApps(t, []string{long, long, long}, grp, LogicVersion, ledger,
- NewExpect(1, "too many inner transactions"))
+ Exp(1, "too many inner transactions"))
}
func TestAssetCreate(t *testing.T) {
@@ -556,12 +613,14 @@ func TestAssetCreate(t *testing.T) {
itxn_submit
int 1
`
- ep, tx, ledger := MakeSampleEnv()
- ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
- TestApp(t, create, ep, "insufficient balance")
- // Give it enough for fee. Recall that we don't check min balance at this level.
- ledger.NewAccount(appAddr(888), MakeTestProto().MinTxnFee)
- TestApp(t, create, ep)
+ // v5 added inners
+ TestLogicRange(t, 5, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
+ TestApp(t, create, ep, "insufficient balance")
+ // Give it enough for fee. Recall that we don't check min balance at this level.
+ ledger.NewAccount(appAddr(888), MakeTestProto().MinTxnFee)
+ TestApp(t, create, ep)
+ })
}
func TestAssetFreeze(t *testing.T) {
@@ -582,13 +641,14 @@ func TestAssetFreeze(t *testing.T) {
int 5000
==
`
- ep, tx, ledger := MakeSampleEnv()
- ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
- // Give it enough for fees. Recall that we don't check min balance at this level.
- ledger.NewAccount(appAddr(888), 12*MakeTestProto().MinTxnFee)
- TestApp(t, create, ep)
+ // v5 added inners
+ TestLogicRange(t, 5, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
+ // Give it enough for fees. Recall that we don't check min balance at this level.
+ ledger.NewAccount(appAddr(888), 12*MakeTestProto().MinTxnFee)
+ TestApp(t, create, ep)
- freeze := `
+ freeze := `
itxn_begin
int afrz ; itxn_field TypeEnum
int 5000 ; itxn_field FreezeAsset
@@ -597,20 +657,21 @@ func TestAssetFreeze(t *testing.T) {
itxn_submit
int 1
`
- TestApp(t, freeze, ep, "invalid Asset reference")
- tx.ForeignAssets = []basics.AssetIndex{basics.AssetIndex(5000)}
- tx.ApplicationArgs = [][]byte{{0x01}}
- TestApp(t, freeze, ep, "does not hold Asset")
- ledger.NewHolding(tx.Receiver, 5000, 55, false)
- TestApp(t, freeze, ep)
- holding, err := ledger.AssetHolding(tx.Receiver, 5000)
- require.NoError(t, err)
- require.Equal(t, true, holding.Frozen)
- tx.ApplicationArgs = [][]byte{{0x00}}
- TestApp(t, freeze, ep)
- holding, err = ledger.AssetHolding(tx.Receiver, 5000)
- require.NoError(t, err)
- require.Equal(t, false, holding.Frozen)
+ TestApp(t, freeze, ep, "unavailable Asset 5000")
+ tx.ForeignAssets = []basics.AssetIndex{basics.AssetIndex(5000)}
+ tx.ApplicationArgs = [][]byte{{0x01}}
+ TestApp(t, freeze, ep, "does not hold Asset")
+ ledger.NewHolding(tx.Receiver, 5000, 55, false)
+ TestApp(t, freeze, ep)
+ holding, err := ledger.AssetHolding(tx.Receiver, 5000)
+ require.NoError(t, err)
+ require.Equal(t, true, holding.Frozen)
+ tx.ApplicationArgs = [][]byte{{0x00}}
+ TestApp(t, freeze, ep)
+ holding, err = ledger.AssetHolding(tx.Receiver, 5000)
+ require.NoError(t, err)
+ require.Equal(t, false, holding.Frozen)
+ })
}
func TestKeyReg(t *testing.T) {
@@ -789,7 +850,7 @@ func TestFieldSetting(t *testing.T) {
TestApp(t, "itxn_begin; int 0; itxn_field Nonparticipation; int 1", ep)
TestApp(t, "itxn_begin; int 1; itxn_field Nonparticipation; int 1", ep)
- TestApp(t, "itxn_begin; int 2; itxn_field Nonparticipation; int 1", ep,
+ TestApp(t, NoTrack("itxn_begin; int 2; itxn_field Nonparticipation; int 1"), ep,
"boolean is neither 1 nor 0")
TestApp(t, "itxn_begin; int 32; bzero; itxn_field RekeyTo; int 1", ep)
@@ -910,8 +971,7 @@ func TestApplCreation(t *testing.T) {
p := "itxn_begin;"
s := "; int 1"
- TestApp(t, p+"int 31; itxn_field ApplicationID"+s, ep,
- "invalid App reference")
+ TestApp(t, p+"int 31; itxn_field ApplicationID"+s, ep, "unavailable App 31")
tx.ForeignApps = append(tx.ForeignApps, 31)
TestApp(t, p+"int 31; itxn_field ApplicationID"+s, ep)
@@ -949,14 +1009,14 @@ func TestApplCreation(t *testing.T) {
"too many foreign accounts")
TestApp(t, p+strings.Repeat("int 621; itxn_field Applications;", 5)+s, ep,
- "invalid App reference")
+ "unavailable App 621")
tx.ForeignApps = append(tx.ForeignApps, basics.AppIndex(621))
TestApp(t, p+strings.Repeat("int 621; itxn_field Applications;", 5)+s, ep)
TestApp(t, p+strings.Repeat("int 621; itxn_field Applications;", 6)+s, ep,
"too many foreign apps")
TestApp(t, p+strings.Repeat("int 621; itxn_field Assets;", 6)+s, ep,
- "invalid Asset reference")
+ "unavailable Asset 621")
tx.ForeignAssets = append(tx.ForeignAssets, basics.AssetIndex(621))
TestApp(t, p+strings.Repeat("int 621; itxn_field Assets;", 6)+s, ep)
TestApp(t, p+strings.Repeat("int 621; itxn_field Assets;", 7)+s, ep,
@@ -1093,68 +1153,72 @@ func TestInnerApplCreate(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- ep, tx, ledger := MakeSampleEnv()
- ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
- ledger.NewAccount(appAddr(888), 50_000)
+ TestLogicRange(t, 6, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ v := ep.Proto.LogicSigVersion
+ ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
+ ledger.NewAccount(appAddr(888), 50_000)
- ops := TestProg(t, "int 50", AssemblerMaxVersion)
- approve := "byte 0x" + hex.EncodeToString(ops.Program)
+ ops := TestProg(t, "int 50", v)
+ approve := "byte 0x" + hex.EncodeToString(ops.Program)
- TestApp(t, `
+ test := func(source string, problems ...string) {
+ t.Helper()
+ TestApp(t, source, ep, problems...)
+ }
+
+ test(`
itxn_begin
int appl; itxn_field TypeEnum
-`+approve+`; itxn_field ApprovalProgram
-`+approve+`; itxn_field ClearStateProgram
+` + approve + `; itxn_field ApprovalProgram
+` + approve + `; itxn_field ClearStateProgram
int 1; itxn_field GlobalNumUint
int 2; itxn_field LocalNumByteSlice
int 3; itxn_field LocalNumUint
itxn_submit
int 1
-`, ep)
+`)
- TestApp(t, `
-int 5000; app_params_get AppGlobalNumByteSlice; assert; int 0; ==; assert
-`, ep, "invalid App reference")
+ test("int 5000; app_params_get AppGlobalNumByteSlice; assert; int 0; ==; assert",
+ "unavailable App 5000")
- call := `
+ call := `
itxn_begin
int appl; itxn_field TypeEnum
int 5000; itxn_field ApplicationID
itxn_submit
int 1
`
- // Can't call it either
- TestApp(t, call, ep, "invalid App reference")
+ // Can't call it either
+ test(call, "unavailable App 5000")
- tx.ForeignApps = []basics.AppIndex{basics.AppIndex(5000)}
- TestApp(t, `
+ tx.ForeignApps = []basics.AppIndex{basics.AppIndex(5000)}
+ test(`
int 5000; app_params_get AppGlobalNumByteSlice; assert; int 0; ==; assert
int 5000; app_params_get AppGlobalNumUint; assert; int 1; ==; assert
int 5000; app_params_get AppLocalNumByteSlice; assert; int 2; ==; assert
int 5000; app_params_get AppLocalNumUint; assert; int 3; ==; assert
int 1
-`, ep)
+`)
- // Call it (default OnComplete is NoOp)
- TestApp(t, call, ep)
+ // Call it (default OnComplete is NoOp)
+ test(call)
- TestApp(t, `
+ test(`
itxn_begin
int appl; itxn_field TypeEnum
int DeleteApplication; itxn_field OnCompletion
txn Applications 1; itxn_field ApplicationID
itxn_submit
int 1
-`, ep)
+`)
- // App is gone
- TestApp(t, `
-int 5000; app_params_get AppGlobalNumByteSlice; !; assert; !; assert; int 1
-`, ep)
+ // App is gone
+ test("int 5000; app_params_get AppGlobalNumByteSlice; !; assert; !; assert; int 1")
- // Can't call it either
- TestApp(t, call, ep, "no such app 5000")
+ // Can't call it either
+ test(call, "no app 5000")
+ })
}
func TestCreateOldAppFails(t *testing.T) {
@@ -2661,20 +2725,29 @@ func TestCreateAndUse(t *testing.T) {
itxn_begin
int axfer; itxn_field TypeEnum
itxn CreatedAssetID; itxn_field XferAsset
- txn Accounts 0; itxn_field AssetReceiver
+ txn Sender; itxn_field AssetReceiver
itxn_submit
int 1
`
- // First testing use in axfer
- ep, tx, ledger := MakeSampleEnv()
- ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
- ledger.NewAccount(appAddr(888), 4*MakeTestProto().MinTxnFee)
- TestApp(t, axfer, ep)
+ // First testing use in axfer, start at v5 so that the failure is tested
+ TestLogicRange(t, 5, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ v := ep.Proto.LogicSigVersion
+ test := func(source string, problems ...string) {
+ t.Helper()
+ TestApp(t, source, ep, problems...)
+ }
- ep.Proto = MakeTestProtoV(CreatedResourcesVersion - 1)
- TestApp(t, axfer, ep, "invalid Asset reference")
+ ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
+ ledger.NewAccount(appAddr(888), 4*MakeTestProto().MinTxnFee)
+
+ if v < CreatedResourcesVersion {
+ test(axfer, "unavailable Asset")
+ } else {
+ test(axfer)
+ }
+ })
balance := `
itxn_begin
@@ -2707,14 +2780,23 @@ func TestCreateAndUse(t *testing.T) {
int 1
`
- // Now test use in asset balance opcode
- ep, tx, ledger = MakeSampleEnv()
- ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
- ledger.NewAccount(appAddr(888), 4*MakeTestProto().MinTxnFee)
- TestApp(t, balance, ep)
+ // Now test use in asset balance opcode, over the same range
+ TestLogicRange(t, 5, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ v := ep.Proto.LogicSigVersion
+ test := func(source string, problems ...string) {
+ t.Helper()
+ TestApp(t, source, ep, problems...)
+ }
- ep.Proto = MakeTestProtoV(CreatedResourcesVersion - 1)
- TestApp(t, balance, ep, "invalid Asset reference")
+ ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
+ ledger.NewAccount(appAddr(888), 4*MakeTestProto().MinTxnFee)
+
+ if v < CreatedResourcesVersion {
+ test(balance, "unavailable Asset "+strconv.Itoa(FirstTestID))
+ } else {
+ test(balance)
+ }
+ })
appcall := `
itxn_begin
@@ -2732,18 +2814,20 @@ func TestCreateAndUse(t *testing.T) {
int 1
`
- // Now as ForeignAsset
- ep, tx, ledger = MakeSampleEnv()
- ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
- ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
- ledger.NewAccount(appAddr(888), 4*MakeTestProto().MinTxnFee)
- // It gets passed the Assets setting
- TestApp(t, appcall, ep, "attempt to self-call")
-
- // Appcall is isn't allowed pre-CreatedResourcesVersion, because same
- // version allowed inner app calls
- // ep.Proto = MakeTestProtoV(CreatedResourcesVersion - 1)
- // TestApp(t, appcall, ep, "invalid Asset reference")
+ // Now as ForeignAsset (starts in v6, when inner app calls allowed)
+ TestLogicRange(t, 6, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ test := func(source string, problems ...string) {
+ TestApp(t, source, ep, problems...)
+ }
+
+ ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
+ ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
+ ledger.NewAccount(appAddr(888), 4*MakeTestProto().MinTxnFee)
+ // It gets passed the Assets setting
+ test(appcall, "attempt to self-call")
+ // Appcall is isn't allowed pre-6, so there's no point in this loop
+ // checking v5.
+ })
}
// main wraps up some TEAL source in a header and footer so that it is
@@ -2757,42 +2841,33 @@ func main(source string) string {
end: int 1`, source)
}
-func hexProgram(t *testing.T, source string) string {
- return "0x" + hex.EncodeToString(TestProg(t, source, AssemblerMaxVersion).Program)
+func hexProgram(t *testing.T, source string, v uint64) string {
+ return "0x" + hex.EncodeToString(TestProg(t, source, v).Program)
}
-// TestCreateAndUseApp checks that an app can be created in an inner txn, and then
+// TestCreateAndSeeApp checks that an app can be created in an inner txn, and then
// the address for it can be looked up.
-func TestCreateUseApp(t *testing.T) {
+func TestCreateSeeApp(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- pay5back := main(`
-itxn_begin
-int pay; itxn_field TypeEnum
-txn Sender; itxn_field Receiver
-int 5; itxn_field Amount
-itxn_submit
-int 1
-`)
-
- createAndUse := `
+ TestLogicRange(t, CreatedResourcesVersion, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
+ ledger.NewAccount(appAddr(888), 1*MakeTestProto().MinTxnFee)
+ createAndUse := `
itxn_begin
int appl; itxn_field TypeEnum
- byte ` + hexProgram(t, pay5back) + `; dup; itxn_field ApprovalProgram; itxn_field ClearStateProgram;
+ byte ` + hexProgram(t, main(""), 5) + `; dup; itxn_field ApprovalProgram; itxn_field ClearStateProgram;
itxn_submit
itxn CreatedApplicationID; app_params_get AppAddress; assert
addr ` + appAddr(5000).String() + `
==
`
-
- ep, tx, ledger := MakeSampleEnv()
- ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
- ledger.NewAccount(appAddr(888), 1*MakeTestProto().MinTxnFee)
- TestApp(t, createAndUse, ep)
- // Again, can't test if this (properly) fails in previous version, because
- // we can't even create apps this way in previous version.
+ TestApp(t, createAndUse, ep)
+ // Again, can't test if this (properly) fails in previous version, because
+ // we can't even create apps this way in previous version.
+ })
}
// TestCreateAndPay checks that an app can be created in an inner app, and then
@@ -2802,7 +2877,9 @@ func TestCreateAndPay(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- pay5back := main(`
+ TestLogicRange(t, CreatedResourcesVersion, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ v := ep.Proto.LogicSigVersion
+ pay5back := main(`
itxn_begin
int pay; itxn_field TypeEnum
txn Sender; itxn_field Receiver
@@ -2811,10 +2888,10 @@ itxn_submit
int 1
`)
- createAndPay := `
+ createAndPay := `
itxn_begin
int appl; itxn_field TypeEnum
- ` + fmt.Sprintf("byte %s", hexProgram(t, pay5back)) + `
+ ` + fmt.Sprintf("byte %s", hexProgram(t, pay5back, v)) + `
dup
itxn_field ApprovalProgram;
itxn_field ClearStateProgram;
@@ -2829,15 +2906,15 @@ int 1
int 1
`
- ep, tx, ledger := MakeSampleEnv()
- ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
- ledger.NewAccount(appAddr(888), 10*MakeTestProto().MinTxnFee)
- TestApp(t, createAndPay, ep)
+ ledger.NewApp(tx.Receiver, 888, basics.AppParams{})
+ ledger.NewAccount(appAddr(888), 10*MakeTestProto().MinTxnFee)
+ TestApp(t, createAndPay, ep)
- // This test is impossible because CreatedResourcesVersion is also when
- // inner txns could make apps.
- // ep.Proto = MakeTestProtoV(CreatedResourcesVersion - 1)
- // TestApp(t, createAndPay, ep, "invalid Address reference")
+ // This test is impossible because CreatedResourcesVersion is also when
+ // inner txns could make apps.
+ // ep.Proto = MakeTestProtoV(CreatedResourcesVersion - 1)
+ // TestApp(t, createAndPay, ep, "invalid Address reference")
+ })
}
// TestInnerGaid ensures there's no confusion over the tracking of ids
@@ -3000,64 +3077,38 @@ done:
TestAppBytes(t, app.ApprovalProgram, ep, "appl depth")
}
-func TestInfiniteRecursion(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- ep, tx, ledger := MakeSampleEnv()
- source := `
-itxn_begin
-int appl; itxn_field TypeEnum
-int 0; app_params_get AppApprovalProgram
-assert
-itxn_field ApprovalProgram
-
-int 0; app_params_get AppClearStateProgram
-assert
-itxn_field ClearStateProgram
-
-itxn_submit
-`
- // This app looks itself up in the ledger, so we need to put it in there.
- ledger.NewApp(tx.Sender, 888, basics.AppParams{
- ApprovalProgram: TestProg(t, source, AssemblerMaxVersion).Program,
- ClearStateProgram: TestProg(t, "int 1", AssemblerMaxVersion).Program,
- })
- // We're testing if this can recur forever. It's hard to fund all these
- // apps, but we can put a huge credit in the ep.
- *ep.FeeCredit = 1_000_000_000
-
- // This has been tested by hand, by setting maxAppCallDepth to 10_000_000
- // but without that, the depth limiter stops it first.
- // TestApp(t, source, ep, "too many inner transactions 1 with 0 left")
-
- TestApp(t, source, ep, "appl depth (8) exceeded")
-}
-
+// TestForeignAppAccountAccess ensures that an app can access the account
+// associated withe an app mentioned in its ForeignApps.
func TestForeignAppAccountAccess(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- ep, tx, ledger := MakeSampleEnv()
- ledger.NewAccount(appAddr(888), 50_000)
- tx.ForeignApps = []basics.AppIndex{basics.AppIndex(111)}
+ TestLogicRange(t, 5, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ v := ep.Proto.LogicSigVersion
+ ledger.NewAccount(appAddr(888), 50_000)
+ tx.ForeignApps = []basics.AppIndex{basics.AppIndex(111)}
- ledger.NewApp(tx.Sender, 111, basics.AppParams{
- ApprovalProgram: TestProg(t, "int 1", AssemblerMaxVersion).Program,
- ClearStateProgram: TestProg(t, "int 1", AssemblerMaxVersion).Program,
- })
+ ledger.NewApp(tx.Sender, 111, basics.AppParams{
+ ApprovalProgram: TestProg(t, "int 1", AssemblerMaxVersion).Program,
+ ClearStateProgram: TestProg(t, "int 1", AssemblerMaxVersion).Program,
+ })
- TestApp(t, `
+ // app address available starting with 7
+ var problem []string
+ if v < 7 {
+ problem = []string{"invalid Account reference " + appAddr(111).String()}
+ }
+
+ TestApp(t, `
itxn_begin
-int pay
-itxn_field TypeEnum
-int 100
-itxn_field Amount
+int pay; itxn_field TypeEnum
+int 100; itxn_field Amount
txn Applications 1
app_params_get AppAddress
assert
itxn_field Receiver
itxn_submit
int 1
-`, ep)
+`, ep, problem...)
+ })
}
diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go
index f6ab14a07..3899cdaeb 100644
--- a/data/transactions/logic/evalStateful_test.go
+++ b/data/transactions/logic/evalStateful_test.go
@@ -70,6 +70,50 @@ func makeOldAndNewEnv(version uint64) (*EvalParams, *EvalParams, *Ledger) {
return old, new, sharedLedger
}
+func (r *resources) String() string {
+ sb := strings.Builder{}
+ if len(r.createdAsas) > 0 {
+ fmt.Fprintf(&sb, "createdAsas: %v\n", r.createdAsas)
+ }
+ if len(r.createdApps) > 0 {
+ fmt.Fprintf(&sb, "createdApps: %v\n", r.createdApps)
+ }
+
+ if len(r.sharedAccounts) > 0 {
+ fmt.Fprintf(&sb, "sharedAccts:\n")
+ for addr := range r.sharedAccounts {
+ fmt.Fprintf(&sb, " %s\n", addr)
+ }
+ }
+ if len(r.sharedAsas) > 0 {
+ fmt.Fprintf(&sb, "sharedAsas:\n")
+ for id := range r.sharedAsas {
+ fmt.Fprintf(&sb, " %d\n", id)
+ }
+ }
+ if len(r.sharedApps) > 0 {
+ fmt.Fprintf(&sb, "sharedApps:\n")
+ for id := range r.sharedApps {
+ fmt.Fprintf(&sb, " %d\n", id)
+ }
+ }
+
+ if len(r.sharedHoldings) > 0 {
+ fmt.Fprintf(&sb, "sharedHoldings:\n")
+ for hl := range r.sharedHoldings {
+ fmt.Fprintf(&sb, " %s x %d\n", hl.Address, hl.Asset)
+ }
+ }
+ if len(r.sharedLocals) > 0 {
+ fmt.Fprintf(&sb, "sharedLocals:\n")
+ for hl := range r.sharedLocals {
+ fmt.Fprintf(&sb, " %s x %d\n", hl.Address, hl.App)
+ }
+ }
+
+ return sb.String()
+}
+
func TestEvalModes(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -78,8 +122,8 @@ func TestEvalModes(t *testing.T) {
// check modeAny (v1 + txna/gtxna) are available in RunModeSignature
// check all opcodes available in runModeApplication
- opcodesRunModeAny := `intcblock 0 1 1 1 1 5 100
- bytecblock 0x414c474f 0x1337 0x2001 0xdeadbeef 0x70077007
+ opcodesRunModeAny := `intcblock 0 1 1 1 1 500 100
+ bytecblock "ALGO" 0x1337 0x2001 0xdeadbeef 0x70077007
bytec 0
sha256
keccak256
@@ -151,24 +195,24 @@ arg 4
&&
`
- opcodesRunModeApplication := `int 0
+ opcodesRunModeApplication := `txn Sender
balance
&&
-int 0
+txn Sender
min_balance
&&
-intc_0
+txn Sender
intc 6 // 100
app_opted_in
&&
-intc_0
+txn Sender
bytec_0 // ALGO
intc_1
app_local_put
bytec_0
intc_1
app_global_put
-intc_0
+txn Sender
intc 6
bytec_0
app_local_get_ex
@@ -179,17 +223,17 @@ bytec_0
app_global_get_ex
pop
&&
-intc_0
+txn Sender
bytec_0
app_local_del
bytec_0
app_global_del
-intc_0
-intc 5 // 5
+txn Sender
+intc 5 // 500
asset_holding_get AssetBalance
pop
&&
-intc_0
+intc 5 // 500
asset_params_get AssetTotal
pop
&&
@@ -217,7 +261,7 @@ log
tx.Note,
}
ep.TxnGroup[0].Txn.ApplicationID = 100
- ep.TxnGroup[0].Txn.ForeignAssets = []basics.AssetIndex{5} // needed since v4
+ ep.TxnGroup[0].Txn.ForeignAssets = []basics.AssetIndex{500} // needed since v4
params := basics.AssetParams{
Total: 1000,
Decimals: 2,
@@ -280,27 +324,43 @@ log
}
// check stateful opcodes are not allowed in stateless mode
- statefulOpcodeCalls := []string{
- "int 0\nbalance",
- "int 0\nmin_balance",
- "int 0\nint 0\napp_opted_in",
- "int 0\nint 0\nbyte 0x01\napp_local_get_ex",
- "byte 0x01\napp_global_get",
- "int 0\nbyte 0x01\napp_global_get_ex",
- "int 1\nbyte 0x01\nbyte 0x01\napp_local_put",
- "byte 0x01\nint 0\napp_global_put",
- "int 0\nbyte 0x01\napp_local_del",
- "byte 0x01\napp_global_del",
- "int 0\nint 0\nasset_holding_get AssetFrozen",
- "int 0\nint 0\nasset_params_get AssetManager",
- "int 0\nint 0\napp_params_get AppApprovalProgram",
- "byte 0x01\nlog",
- }
-
- for _, source := range statefulOpcodeCalls {
- source := source
- testLogic(t, source, AssemblerMaxVersion, defaultEvalParams(),
- "not allowed in current mode", "not allowed in current mode")
+ for v := uint64(2); v <= AssemblerMaxVersion; v++ {
+ sender := "txn Sender;"
+ if v < directRefEnabledVersion {
+ sender = "int 0;"
+ }
+ statefulOpcodeCalls := map[string]uint64{
+ sender + "balance": 2,
+ sender + "min_balance": 3,
+ sender + "int 0; app_opted_in": 2,
+ sender + "int 0; byte 0x01; app_local_get_ex": 2,
+ "byte 0x01; app_global_get": 2,
+ "int 0; byte 0x01; app_global_get_ex": 2,
+ sender + "byte 0x01; byte 0x01; app_local_put": 2,
+ "byte 0x01; int 0; app_global_put": 2,
+ sender + "byte 0x01; app_local_del": 2,
+ "byte 0x01; app_global_del": 2,
+ sender + "int 0; asset_holding_get AssetFrozen": 2,
+ "int 0; int 0; asset_params_get AssetManager": 2,
+ "int 0; int 0; app_params_get AppApprovalProgram": 5,
+ "byte 0x01; log": 5,
+ sender + "acct_params_get AcctBalance": 7,
+
+ "byte 0x1234; int 12; box_create": 8,
+ "byte 0x1234; int 12; int 4; box_extract": 8,
+ "byte 0x1234; int 12; byte 0x24; box_replace": 8,
+ "byte 0x1234; box_del": 8,
+ "byte 0x1234; box_len": 8,
+ "byte 0x1234; box_get": 8,
+ "byte 0x1234; byte 0x12; box_put": 8,
+ }
+ for source, introduced := range statefulOpcodeCalls {
+ if v < introduced {
+ continue
+ }
+ testLogic(t, source, v, defaultEvalParamsWithVersion(v),
+ "not allowed in current mode", "not allowed in current mode")
+ }
}
require.Equal(t, RunMode(1), ModeSig)
@@ -311,35 +371,35 @@ log
func TestBalance(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
- ep, tx, ledger := makeSampleEnv()
- text := "int 2; balance; int 177; =="
- ledger.NewAccount(tx.Receiver, 177)
- testApp(t, text, ep, "invalid Account reference")
-
- text = `int 1; balance; int 177; ==`
- testApp(t, text, ep)
-
- text = `txn Accounts 1; balance; int 177; ==;`
- // won't assemble in old version teal
- testProg(t, text, directRefEnabledVersion-1, Expect{1, "balance arg 0 wanted type uint64..."})
- // but legal after that
- testApp(t, text, ep)
-
- text = "int 0; balance; int 13; ==; assert; int 1"
- var addr basics.Address
- copy(addr[:], []byte("aoeuiaoeuiaoeuiaoeuiaoeuiaoeui02"))
- ledger.NewAccount(addr, 13)
- testApp(t, text, ep, "assert failed")
-
- ledger.NewAccount(tx.Sender, 13)
- testApp(t, text, ep)
+ testLogicRange(t, 2, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ v := ep.Proto.LogicSigVersion
+ ledger.NewAccount(tx.Receiver, 177)
+ testApp(t, "int 2; balance; int 177; ==", ep, "invalid Account reference")
+ testApp(t, `int 1; balance; int 177; ==`, ep)
+
+ source := `txn Accounts 1; balance; int 177; ==;`
+ // won't assemble in old version teal
+ if v < directRefEnabledVersion {
+ testProg(t, source, ep.Proto.LogicSigVersion,
+ exp(1, "balance arg 0 wanted type uint64..."))
+ return
+ }
+
+ // but legal after that
+ testApp(t, source, ep)
+
+ source = "txn Sender; balance; int 13; ==; assert; int 1"
+ testApp(t, source, ep, "assert failed")
+
+ ledger.NewAccount(tx.Sender, 13)
+ testApp(t, source, ep)
+ })
}
func testApps(t *testing.T, programs []string, txgroup []transactions.SignedTxn, version uint64, ledger *Ledger,
- expected ...Expect) {
+ expected ...expect) *EvalParams {
t.Helper()
codes := make([][]byte, len(programs))
for i, program := range programs {
@@ -364,22 +424,27 @@ func testApps(t *testing.T, programs []string, txgroup []transactions.SignedTxn,
ep.Ledger = ledger
ep.SigLedger = ledger
testAppsBytes(t, codes, ep, expected...)
+ return ep
}
-func testAppsBytes(t *testing.T, programs [][]byte, ep *EvalParams, expected ...Expect) {
+func testAppsBytes(t *testing.T, programs [][]byte, ep *EvalParams, expected ...expect) {
t.Helper()
- require.Equal(t, len(programs), len(ep.TxnGroup))
+ require.LessOrEqual(t, len(programs), len(ep.TxnGroup))
for i := range ep.TxnGroup {
- if programs[i] != nil {
+ program := ep.TxnGroup[i].Txn.ApprovalProgram
+ if len(programs) > i && programs[i] != nil {
+ program = programs[i]
+ }
+ if program != nil {
appID := ep.TxnGroup[i].Txn.ApplicationID
if appID == 0 {
appID = basics.AppIndex(888)
}
if len(expected) > 0 && expected[0].l == i {
- testAppFull(t, programs[i], i, appID, ep, expected[0].s)
+ testAppFull(t, program, i, appID, ep, expected[0].s)
break // Stop after first failure
} else {
- testAppFull(t, programs[i], i, appID, ep)
+ testAppFull(t, program, i, appID, ep)
}
}
}
@@ -461,45 +526,70 @@ func testAppFull(t *testing.T, program []byte, gi int, aid basics.AppIndex, ep *
return delta
}
+// testLogicRange allows for running tests against a range of avm
+// versions. Generally `start` will be the version that introduced the feature,
+// and `stop` will be 0 to indicate it should work right on up through the
+// current version. `stop` will be an actual version number if we're confirming
+// that something STOPS working as of a particular version. Note that this does
+// *not* use different consensus versions. It is tempting to make it find the
+// lowest possible consensus version in the loop in order to support the `v` it
+// it working on. For super confidence, one might argue this should be a nested
+// loop over all of the consensus versions that work with the `v`, from the
+// first possible, to vFuture.
+func testLogicRange(t *testing.T, start, stop int, test func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger)) {
+ t.Helper()
+ if stop == 0 { // Treat 0 as current max
+ stop = LogicVersion
+ }
+
+ for v := uint64(start); v <= uint64(stop); v++ {
+ t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
+ ep, tx, ledger := makeSampleEnvWithVersion(v)
+ test(t, ep, tx, ledger)
+ })
+ }
+}
+
func TestMinBalance(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
- ep, tx, ledger := makeSampleEnv()
-
- ledger.NewAccount(tx.Sender, 234)
- ledger.NewAccount(tx.Receiver, 123)
+ // since v3 is before directRefEnabledVersion, do a quick test on it separately
+ ep, tx, ledger := makeSampleEnvWithVersion(3)
+ ledger.NewAccount(tx.Sender, 100)
testApp(t, "int 0; min_balance; int 1001; ==", ep)
// Sender makes an asset, min balance goes up
ledger.NewAsset(tx.Sender, 7, basics.AssetParams{Total: 1000})
testApp(t, "int 0; min_balance; int 2002; ==", ep)
- schemas := makeApp(1, 2, 3, 4)
- ledger.NewApp(tx.Sender, 77, schemas)
- ledger.NewLocals(tx.Sender, 77)
- // create + optin + 10 schema base + 4 ints + 6 bytes (local
- // and global count b/c NewLocals opts the creator in)
- minb := 1002 + 1006 + 10*1003 + 4*1004 + 6*1005
- testApp(t, fmt.Sprintf("int 0; min_balance; int %d; ==", 2002+minb), ep)
- // request extra program pages, min balance increase
- withepp := makeApp(1, 2, 3, 4)
- withepp.ExtraProgramPages = 2
- ledger.NewApp(tx.Sender, 77, withepp)
- minb += 2 * 1002
- testApp(t, fmt.Sprintf("int 0; min_balance; int %d; ==", 2002+minb), ep)
-
- testApp(t, "int 1; min_balance; int 1001; ==", ep) // 1 == Accounts[0]
- testProg(t, "txn Accounts 1; min_balance; int 1001; ==", directRefEnabledVersion-1,
- Expect{1, "min_balance arg 0 wanted type uint64..."})
- testProg(t, "txn Accounts 1; min_balance; int 1001; ==", directRefEnabledVersion)
- testApp(t, "txn Accounts 1; min_balance; int 1001; ==", ep) // 1 == Accounts[0]
- // Receiver opts in
- ledger.NewHolding(tx.Receiver, 7, 1, true)
- testApp(t, "int 1; min_balance; int 2002; ==", ep) // 1 == Accounts[0]
-
- testApp(t, "int 2; min_balance; int 1001; ==", ep, "invalid Account reference 2")
+ // now test in more detail v4 and on
+ testLogicRange(t, 4, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ ledger.NewAccount(tx.Sender, 234)
+ ledger.NewAccount(tx.Receiver, 123)
+ testApp(t, "txn Sender; min_balance; int 1001; ==", ep)
+ // Sender makes an asset, min balance goes up
+ ledger.NewAsset(tx.Sender, 7, basics.AssetParams{Total: 1000})
+ testApp(t, "txn Sender; min_balance; int 2002; ==", ep)
+ schemas := makeApp(1, 2, 3, 4)
+ ledger.NewApp(tx.Sender, 77, schemas)
+ ledger.NewLocals(tx.Sender, 77)
+ // create + optin + 10 schema base + 4 ints + 6 bytes (local
+ // and global count b/c NewLocals opts the creator in)
+ minb := 1002 + 1006 + 10*1003 + 4*1004 + 6*1005
+ testApp(t, fmt.Sprintf("txn Sender; min_balance; int %d; ==", 2002+minb), ep)
+ // request extra program pages, min balance increase
+ withepp := makeApp(1, 2, 3, 4)
+ withepp.ExtraProgramPages = 2
+ ledger.NewApp(tx.Sender, 77, withepp)
+ minb += 2 * 1002
+ testApp(t, fmt.Sprintf("txn Sender; min_balance; int %d; ==", 2002+minb), ep)
+
+ testApp(t, "txn Accounts 1; min_balance; int 1001; ==", ep)
+ // Receiver opts in
+ ledger.NewHolding(tx.Receiver, 7, 1, true)
+ testApp(t, "txn Receiver; min_balance; int 2002; ==", ep)
+ })
}
func TestAppCheckOptedIn(t *testing.T) {
@@ -533,7 +623,7 @@ func TestAppCheckOptedIn(t *testing.T) {
testApp(t, "int 1; int 2; app_opted_in; int 0; ==", pre) // in pre, int 2 is an actual app id
testApp(t, "byte \"aoeuiaoeuiaoeuiaoeuiaoeuiaoeui01\"; int 2; app_opted_in; int 1; ==", now)
testProg(t, "byte \"aoeuiaoeuiaoeuiaoeuiaoeuiaoeui01\"; int 2; app_opted_in; int 1; ==", directRefEnabledVersion-1,
- Expect{1, "app_opted_in arg 0 wanted type uint64..."})
+ exp(1, "app_opted_in arg 0 wanted type uint64..."))
// Receiver opts into 888, the current app in testApp
ledger.NewLocals(txn.Txn.Receiver, 888)
@@ -601,14 +691,14 @@ app_local_get_ex
bnz exist
err
exist:
-byte 0x414c474f
+byte "ALGO"
==`
ledger.NewLocal(now.TxnGroup[0].Txn.Receiver, 100, string(protocol.PaymentTx), basics.TealValue{Type: basics.TealBytesType, Bytes: "ALGO"})
testApp(t, text, now)
testApp(t, strings.Replace(text, "int 1 // account idx", "byte \"aoeuiaoeuiaoeuiaoeuiaoeuiaoeui01\"", -1), now)
testProg(t, strings.Replace(text, "int 1 // account idx", "byte \"aoeuiaoeuiaoeuiaoeuiaoeuiaoeui01\"", -1), directRefEnabledVersion-1,
- Expect{4, "app_local_get_ex arg 0 wanted type uint64..."})
+ exp(4, "app_local_get_ex arg 0 wanted type uint64..."))
testApp(t, strings.Replace(text, "int 100 // app id", "int 2", -1), now)
// Next we're testing if the use of the current app's id works
// as a direct reference. The error is because the receiver
@@ -616,9 +706,9 @@ byte 0x414c474f
now.TxnGroup[0].Txn.ApplicationID = 123
testApp(t, strings.Replace(text, "int 100 // app id", "int 123", -1), now, "is not opted into")
testApp(t, strings.Replace(text, "int 100 // app id", "int 2", -1), pre, "is not opted into")
- testApp(t, strings.Replace(text, "int 100 // app id", "int 9", -1), now, "invalid App reference 9")
+ testApp(t, strings.Replace(text, "int 100 // app id", "int 9", -1), now, "unavailable App 9")
testApp(t, strings.Replace(text, "int 1 // account idx", "byte \"aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00\"", -1), now,
- "no such address")
+ "no account")
// opt into 123, and try again
ledger.NewApp(now.TxnGroup[0].Txn.Receiver, 123, basics.AppParams{})
@@ -643,7 +733,7 @@ app_local_get_ex
bnz exist
err
exist:
-byte 0x414c474f
+byte "ALGO"
==`
ledger.NewLocal(now.TxnGroup[0].Txn.Sender, 100, string(protocol.PaymentTx), basics.TealValue{Type: basics.TealBytesType, Bytes: "ALGO"})
@@ -662,7 +752,7 @@ app_local_get_ex
bnz exist
err
exist:
-byte 0x414c474f
+byte "ALGO"
==`
ledger.NewLocals(now.TxnGroup[0].Txn.Sender, 56)
@@ -673,7 +763,7 @@ byte 0x414c474f
text = `int 0 // account idx
txn ApplicationArgs 0
app_local_get
-byte 0x414c474f
+byte "ALGO"
==`
ledger.NewLocal(now.TxnGroup[0].Txn.Sender, 100, string(protocol.PaymentTx), basics.TealValue{Type: basics.TealBytesType, Bytes: "ALGO"})
@@ -681,14 +771,14 @@ byte 0x414c474f
testApp(t, text, now)
testApp(t, strings.Replace(text, "int 0 // account idx", "byte \"aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00\"", -1), now)
testProg(t, strings.Replace(text, "int 0 // account idx", "byte \"aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00\"", -1), directRefEnabledVersion-1,
- Expect{3, "app_local_get arg 0 wanted type uint64..."})
+ exp(3, "app_local_get arg 0 wanted type uint64..."))
testApp(t, strings.Replace(text, "int 0 // account idx", "byte \"aoeuiaoeuiaoeuiaoeuiaoeuiaoeui01\"", -1), now)
testApp(t, strings.Replace(text, "int 0 // account idx", "byte \"aoeuiaoeuiaoeuiaoeuiaoeuiaoeui02\"", -1), now,
"invalid Account reference")
// check app_local_get default value
text = `int 0 // account idx
-byte 0x414c474f
+byte "ALGO"
app_local_get
int 0
==`
@@ -707,7 +797,7 @@ app_global_get_ex
bnz exist
err
exist:
-byte 0x414c474f
+byte "ALGO"
==
int 1 // ForeignApps index
txn ApplicationArgs 0
@@ -715,12 +805,12 @@ app_global_get_ex
bnz exist1
err
exist1:
-byte 0x414c474f
+byte "ALGO"
==
&&
txn ApplicationArgs 0
app_global_get
-byte 0x414c474f
+byte "ALGO"
==
&&
`
@@ -729,7 +819,7 @@ byte 0x414c474f
now.TxnGroup[0].Txn.ApplicationID = 100
now.TxnGroup[0].Txn.ForeignApps = []basics.AppIndex{now.TxnGroup[0].Txn.ApplicationID}
- testApp(t, text, now, "no such app")
+ testApp(t, text, now, "no app 100")
// create the app and check the value from ApplicationArgs[0] (protocol.PaymentTx) does not exist
ledger.NewApp(now.TxnGroup[0].Txn.Sender, 100, basics.AppParams{})
@@ -742,11 +832,11 @@ byte 0x414c474f
// check error on invalid app index for app_global_get_ex
text = "int 2; txn ApplicationArgs 0; app_global_get_ex"
- testApp(t, text, now, "invalid App reference 2")
+ testApp(t, text, now, "unavailable App 2")
// check that actual app id ok instead of indirect reference
- text = "int 100; txn ApplicationArgs 0; app_global_get_ex; int 1; ==; assert; byte 0x414c474f; =="
+ text = `int 100; txn ApplicationArgs 0; app_global_get_ex; int 1; ==; assert; byte "ALGO"; ==`
testApp(t, text, now)
- testApp(t, text, pre, "invalid App reference 100") // but not in old teal
+ testApp(t, text, pre, "App index 100 beyond") // but not in old teal
// check app_global_get default value
text = "byte 0x414c474f55; app_global_get; int 0; =="
@@ -824,7 +914,7 @@ int 0//params
asset_params_get AssetUnitName
!
bnz error
-byte 0x414c474f
+byte "ALGO"
==
&&
int 0//params
@@ -939,11 +1029,11 @@ func testAssetsByVersion(t *testing.T, assetsTestProgram string, version uint64)
// was legal to get balance on a non-ForeignAsset
testApp(t, "int 0; int 54; asset_holding_get AssetBalance; ==", pre)
// but not since directRefEnabledVersion
- testApp(t, "int 0; int 54; asset_holding_get AssetBalance", now, "invalid Asset reference 54")
+ testApp(t, "int 0; int 54; asset_holding_get AssetBalance", now, "unavailable Asset 54")
// it wasn't legal to use a direct ref for account
testProg(t, `byte "aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00"; int 54; asset_holding_get AssetBalance`,
- directRefEnabledVersion-1, Expect{1, "asset_holding_get AssetBalance arg 0 wanted type uint64..."})
+ directRefEnabledVersion-1, exp(1, "asset_holding_get AssetBalance arg 0 wanted type uint64..."))
// but it is now (empty asset yields 0,0 on stack)
testApp(t, `byte "aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00"; int 55; asset_holding_get AssetBalance; ==`, now)
// This is receiver, who is in Assets array
@@ -952,8 +1042,8 @@ func testAssetsByVersion(t *testing.T, assetsTestProgram string, version uint64)
testApp(t, `byte "aoeuiaoeuiaoeuiaoeuiaoeuiaoeui02"; int 55; asset_holding_get AssetBalance; ==`, now, "invalid")
// for params get, presence in ForeignAssets has always be required
- testApp(t, "int 5; asset_params_get AssetTotal", pre, "invalid Asset reference 5")
- testApp(t, "int 5; asset_params_get AssetTotal", now, "invalid Asset reference 5")
+ testApp(t, "int 5; asset_params_get AssetTotal", pre, "Asset index 5 beyond")
+ testApp(t, "int 5; asset_params_get AssetTotal", now, "unavailable Asset 5")
params := basics.AssetParams{
Total: 1000,
@@ -986,11 +1076,11 @@ func testAssetsByVersion(t *testing.T, assetsTestProgram string, version uint64)
testApp(t, strings.Replace(assetsTestProgram, "int 55", "int 0", -1), now)
// but old code cannot
- testProg(t, strings.Replace(assetsTestProgram, "int 0//account", "byte \"aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00\"", -1), directRefEnabledVersion-1, Expect{3, "asset_holding_get AssetBalance arg 0 wanted type uint64..."})
+ testProg(t, strings.Replace(assetsTestProgram, "int 0//account", "byte \"aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00\"", -1), directRefEnabledVersion-1, exp(3, "asset_holding_get AssetBalance arg 0 wanted type uint64..."))
if version < 5 {
// Can't run these with AppCreator anyway
- testApp(t, strings.Replace(assetsTestProgram, "int 0//params", "int 55", -1), pre, "invalid Asset ref")
+ testApp(t, strings.Replace(assetsTestProgram, "int 0//params", "int 55", -1), pre, "Asset index 55 beyond")
testApp(t, strings.Replace(assetsTestProgram, "int 55", "int 0", -1), pre, "err opcode")
}
@@ -1100,130 +1190,275 @@ intc_1
testApp(t, notrack(source), now, "cannot compare ([]byte to uint64)")
}
-func TestAppParams(t *testing.T) {
+// TestAssetDisambiguation ensures we have a consistent interpretation of low
+// numbers when used as an argument to asset_*_get. A low number is an asset ID
+// if that asset ID is available, or a slot number in txn.Assets if not.
+func TestAssetDisambiguation(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- ep, tx, ledger := makeSampleEnv()
- ledger.NewAccount(tx.Sender, 1)
- ledger.NewApp(tx.Sender, 100, basics.AppParams{})
- /* app id is in ForeignApps, but does not exist */
- source := "int 56; app_params_get AppExtraProgramPages; int 0; ==; assert; int 0; =="
- testApp(t, source, ep)
- /* app id is in ForeignApps, but has zero ExtraProgramPages */
- source = "int 100; app_params_get AppExtraProgramPages; int 1; ==; assert; int 0; =="
- testApp(t, source, ep)
+ // It would be nice to start at 2, when apps were added, but `assert` is
+ // very convenient for testing, and nothing important changed from 2 to
+ // 3. (Between directRefEnabledVersion=4, so that change is a big deal.)
+ testLogicRange(t, 3, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ ledger.NewAsset(tx.Sender, 1, basics.AssetParams{AssetName: "one", Total: 1})
+ ledger.NewAsset(tx.Sender, 255, basics.AssetParams{AssetName: "twenty", Total: 255})
+ ledger.NewAsset(tx.Sender, 256, basics.AssetParams{AssetName: "thirty", Total: 256})
+ tx.ForeignAssets = []basics.AssetIndex{255, 256}
+ // Since 1 is not available, 1 must mean the 1th asset slot = 256
+ testApp(t, `int 1; asset_params_get AssetName; assert; byte "thirty"; ==`, ep)
+
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ // in v3, the asset argument is always treated as an ID, so this is asset 1
+ testApp(t, `int 0; int 1; asset_holding_get AssetBalance; assert; int 1; ==`, ep)
+ } else {
+ testApp(t, `int 0; int 1; asset_holding_get AssetBalance; assert; int 256; ==`, ep)
+ }
+
+ tx.ForeignAssets = []basics.AssetIndex{1, 256}
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ // There's no direct use of assets IDs, so 1 is still the 1th slot (256)
+ testApp(t, `int 1; asset_params_get AssetName; assert; byte "thirty"; ==`, ep)
+ } else {
+ // Since 1 IS available, 1 means the assetid=1, not the 1th slot
+ testApp(t, `int 1; asset_params_get AssetName; assert; byte "one"; ==`, ep)
+ }
+ testApp(t, `int 0; int 1; asset_holding_get AssetBalance; assert; int 1; ==`, ep)
+
+ ep.Proto.AppForbidLowResources = true
+ tx.ForeignAssets = []basics.AssetIndex{255, 256}
+ // Since 1 is not available, 1 must mean the 1th asset slot = 256
+ testApp(t, `int 1; asset_params_get AssetName; assert; byte "thirty"; ==`, ep)
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ // in v3, the asset argument is always treated as an ID, so this is asset 1
+ testApp(t, `int 0; int 1; asset_holding_get AssetBalance; assert; int 256; ==`, ep,
+ "low Asset lookup 1")
+ } else {
+ testApp(t, `int 0; int 1; asset_holding_get AssetBalance; assert; int 256; ==`, ep)
+ }
+
+ // but now if that resolution led to a number below 255, boom
+ tx.ForeignAssets = []basics.AssetIndex{256, 255}
+ testApp(t, `int 1; asset_params_get AssetName; assert; byte "thirty"; ==`, ep,
+ "low Asset lookup 255")
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ // in v3, the asset argument is always treated as an ID, so this is asset 1
+ testApp(t, `int 0; int 1; asset_holding_get AssetBalance; assert; int 30; ==`, ep,
+ "low Asset lookup 1")
+ } else {
+ testApp(t, `int 0; int 1; asset_holding_get AssetBalance; assert; int 30; ==`, ep,
+ "low Asset lookup 255")
+ }
+
+ tx.ForeignAssets = []basics.AssetIndex{1, 256}
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ // in v3, the asset argument is always a slot, so this is asset 256
+ testApp(t, `int 1; asset_params_get AssetName; assert; byte "thirty"; ==`, ep)
+ } else {
+ // Since 1 IS available, 1 means the assetid=1, not the 1th slot
+ testApp(t, `int 1; asset_params_get AssetName; assert; byte "one"; ==`, ep,
+ "low Asset lookup 1")
+ }
+ // pre v4 and the availability rule come to the same conclusion: treat the 1 as an ID
+ testApp(t, `int 0; int 1; asset_holding_get AssetBalance; assert; int 1; ==`, ep,
+ "low Asset lookup 1")
+ })
}
-func TestAcctParams(t *testing.T) {
+// TestAppDisambiguation ensures we have a consistent interpretation of low
+// numbers when used as an argument to app_(global,local)_get. A low number is
+// an app ID if that app ID is available, or a slot number in
+// txn.ForeignApplications if not.
+func TestAppDisambiguation(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- ep, tx, ledger := makeSampleEnv()
- source := "int 0; acct_params_get AcctBalance; !; assert; int 0; =="
- testApp(t, source, ep)
+ // It would be nice to start at 2, when apps were added, but `assert` is
+ // very convenient for testing, and nothing important changed from 2 to
+ // 3. (But directRefEnabledVersion=4, so that change is a big deal.)
+ testLogicRange(t, 3, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ // make apps with identifiable properties, so we can tell what we get
+ makeIdentifiableApp := func(appID uint64) {
+ ledger.NewApp(tx.Sender, basics.AppIndex(appID), basics.AppParams{
+ GlobalState: map[string]basics.TealValue{"a": {
+ Type: basics.TealUintType,
+ Uint: appID,
+ }},
+ ExtraProgramPages: uint32(appID),
+ })
+ ledger.NewLocals(tx.Sender, appID)
+ ledger.NewLocal(tx.Sender, appID, "x", basics.TealValue{Type: basics.TealUintType, Uint: appID * 10})
+ }
+ makeIdentifiableApp(1)
+ makeIdentifiableApp(20)
+ makeIdentifiableApp(256)
+
+ tx.ForeignApps = []basics.AppIndex{20, 256}
+ // Since 1 is not available, 1 must mean the first app slot = 20 (recall, 0 mean "this app")
+ if ep.Proto.LogicSigVersion >= 5 { // to get AppExtraProgramPages
+ testApp(t, `int 1; app_params_get AppExtraProgramPages; assert; int 20; ==`, ep)
+ }
+ testApp(t, `int 1; byte "a"; app_global_get_ex; assert; int 20; ==`, ep)
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ // in v3, the app argument is always treated as an ID.
+ testApp(t, `int 0; int 1; byte "x"; app_local_get_ex; assert; int 10; ==`, ep)
+ } else {
+ testApp(t, `int 0; int 1; byte "x"; app_local_get_ex; assert; int 200; ==`, ep)
+ }
- source = "int 0; acct_params_get AcctMinBalance; !; assert; int 1001; =="
- testApp(t, source, ep)
+ // Make 1 available, so now 1 means the appid=1, not the 1th slot
+ tx.ForeignApps = []basics.AppIndex{1, 256}
+ if ep.Proto.LogicSigVersion >= 5 { // to get AppExtraProgramPages
+ testApp(t, `int 1; app_params_get AppExtraProgramPages; assert; int 1; ==`, ep)
+ }
+ testApp(t, `int 1; byte "a"; app_global_get_ex; assert; int 1; ==`, ep)
+ testApp(t, `int 0; int 1; byte "x"; app_local_get_ex; assert; int 10; ==`, ep)
+
+ // same tests, but as of AppForbidLowResources, using 1 is forbidden
+ ep.Proto.AppForbidLowResources = true
+
+ // repeat the first tests, they are using 20 and 256 directly, which are too low
+ tx.ForeignApps = []basics.AppIndex{20, 256}
+ if ep.Proto.LogicSigVersion >= 5 { // to get AppExtraProgramPages
+ testApp(t, `int 1; app_params_get AppExtraProgramPages; assert; int 20; ==`, ep,
+ "low App lookup 20")
+ testApp(t, `int 2; app_params_get AppExtraProgramPages; assert; int 256; ==`, ep)
+ }
+ testApp(t, `int 1; byte "a"; app_global_get_ex; assert; int 20; ==`, ep,
+ "low App lookup 20")
+ testApp(t, `int 2; byte "a"; app_global_get_ex; assert; int 256; ==`, ep)
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ // in v3, the app argument is always treated as an ID.
+ testApp(t, `int 0; int 1; byte "x"; app_local_get_ex; assert; int 200; ==`, ep,
+ "low App lookup 1")
+ testApp(t, `int 0; int 2; byte "x"; app_local_get_ex; assert; int 2560; ==`, ep,
+ "low App lookup 2")
+ } else {
+ testApp(t, `int 0; int 1; byte "x"; app_local_get_ex; assert; int 200; ==`, ep,
+ "low App lookup 20")
+ testApp(t, `int 0; int 2; byte "x"; app_local_get_ex; assert; int 2560; ==`, ep)
+ }
- ledger.NewAccount(tx.Sender, 42)
+ // repeat the second tests, which are using 1, which is too low
+ tx.ForeignApps = []basics.AppIndex{1, 256}
+ if ep.Proto.LogicSigVersion >= 5 { // to get AppExtraProgramPages
+ testApp(t, `int 1; app_params_get AppExtraProgramPages; assert; int 1; ==`, ep,
+ "low App lookup 1")
+ }
+ testApp(t, `int 1; byte "a"; app_global_get_ex; assert; int 1; ==`, ep,
+ "low App lookup 1")
+ testApp(t, `int 0; int 1; byte "x"; app_local_get_ex; assert; int 10; ==`, ep,
+ "low App lookup 1")
+ })
+}
- source = "int 0; acct_params_get AcctBalance; assert; int 42; =="
- testApp(t, source, ep)
+func TestAppParams(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ // start at 5 for app_params_get
+ testLogicRange(t, 5, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ ledger.NewAccount(tx.Sender, 1)
+ ledger.NewApp(tx.Sender, 100, basics.AppParams{})
+
+ /* app id is in ForeignApps, but does not exist */
+ source := "int 56; app_params_get AppExtraProgramPages; int 0; ==; assert; int 0; =="
+ testApp(t, source, ep)
+ /* app id is in ForeignApps, but has zero ExtraProgramPages */
+ source = "int 100; app_params_get AppExtraProgramPages; int 1; ==; assert; int 0; =="
+ testApp(t, source, ep)
+ })
+}
- source = "int 0; acct_params_get AcctMinBalance; assert; int 1001; =="
- testApp(t, source, ep)
+func TestAcctParams(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
- source = "int 0; acct_params_get AcctAuthAddr; assert; global ZeroAddress; =="
- testApp(t, source, ep)
+ // start at 6 for acct_params_get
+ testLogicRange(t, 6, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ test := func(source string) {
+ t.Helper()
+ testApp(t, source, ep)
+ }
- // No apps or schema at first, then 1 created and the global schema noted
- source = "int 0; acct_params_get AcctTotalAppsCreated; assert; !"
- testApp(t, source, ep)
- source = "int 0; acct_params_get AcctTotalNumUint; assert; !"
- testApp(t, source, ep)
- source = "int 0; acct_params_get AcctTotalNumByteSlice; assert; !"
- testApp(t, source, ep)
- source = "int 0; acct_params_get AcctTotalExtraAppPages; assert; !"
- testApp(t, source, ep)
- ledger.NewApp(tx.Sender, 2000, basics.AppParams{
- StateSchemas: basics.StateSchemas{
- LocalStateSchema: basics.StateSchema{
- NumUint: 6,
- NumByteSlice: 7,
- },
- GlobalStateSchema: basics.StateSchema{
- NumUint: 8,
- NumByteSlice: 9,
- },
- },
- ExtraProgramPages: 2,
- })
- source = "int 0; acct_params_get AcctTotalAppsCreated; assert; int 1; =="
- testApp(t, source, ep)
- source = "int 0; acct_params_get AcctTotalNumUint; assert; int 8; =="
- testApp(t, source, ep)
- source = "int 0; acct_params_get AcctTotalNumByteSlice; assert; int 9; =="
- testApp(t, source, ep)
- source = "int 0; acct_params_get AcctTotalExtraAppPages; assert; int 2; =="
- testApp(t, source, ep)
+ test("txn Sender; acct_params_get AcctBalance; !; assert; int 0; ==")
+ test("txn Sender; acct_params_get AcctMinBalance; !; assert; int 1001; ==")
- // Not opted in at first, then opted into 1, schema added
- source = "int 0; acct_params_get AcctTotalAppsOptedIn; assert; !"
- testApp(t, source, ep)
- ledger.NewLocals(tx.Sender, 2000)
- source = "int 0; acct_params_get AcctTotalAppsOptedIn; assert; int 1; =="
- testApp(t, source, ep)
- source = "int 0; acct_params_get AcctTotalNumUint; assert; int 8; int 6; +; =="
- testApp(t, source, ep)
- source = "int 0; acct_params_get AcctTotalNumByteSlice; assert; int 9; int 7; +; =="
- testApp(t, source, ep)
+ ledger.NewAccount(tx.Sender, 42)
- // No ASAs at first, then 1 created AND in total
- source = "int 0; acct_params_get AcctTotalAssetsCreated; assert; !"
- testApp(t, source, ep)
- source = "int 0; acct_params_get AcctTotalAssets; assert; !"
- testApp(t, source, ep)
- ledger.NewAsset(tx.Sender, 3000, basics.AssetParams{})
- source = "int 0; acct_params_get AcctTotalAssetsCreated; assert; int 1; =="
- testApp(t, source, ep)
- source = "int 0; acct_params_get AcctTotalAssets; assert; int 1; =="
- testApp(t, source, ep)
+ test("txn Sender; acct_params_get AcctBalance; assert; int 42; ==")
+ test("txn Sender; acct_params_get AcctMinBalance; assert; int 1001; ==")
+ test("txn Sender; acct_params_get AcctAuthAddr; assert; global ZeroAddress; ==")
+
+ if ep.Proto.LogicSigVersion < 8 {
+ return // the rest uses fields that came at 8
+ }
+ // No apps or schema at first, then 1 created and the global schema noted
+ test("txn Sender; acct_params_get AcctTotalAppsCreated; assert; !")
+ test("txn Sender; acct_params_get AcctTotalNumUint; assert; !")
+ test("txn Sender; acct_params_get AcctTotalNumByteSlice; assert; !")
+ test("txn Sender; acct_params_get AcctTotalExtraAppPages; assert; !")
+ ledger.NewApp(tx.Sender, 2000, basics.AppParams{
+ StateSchemas: basics.StateSchemas{
+ LocalStateSchema: basics.StateSchema{
+ NumUint: 6,
+ NumByteSlice: 7,
+ },
+ GlobalStateSchema: basics.StateSchema{
+ NumUint: 8,
+ NumByteSlice: 9,
+ },
+ },
+ ExtraProgramPages: 2,
+ })
+ test("txn Sender; acct_params_get AcctTotalAppsCreated; assert; int 1; ==")
+ test("txn Sender; acct_params_get AcctTotalNumUint; assert; int 8; ==")
+ test("txn Sender; acct_params_get AcctTotalNumByteSlice; assert; int 9; ==")
+ test("txn Sender; acct_params_get AcctTotalExtraAppPages; assert; int 2; ==")
+
+ // Not opted in at first, then opted into 1, schema added
+ test("txn Sender; acct_params_get AcctTotalAppsOptedIn; assert; !")
+ ledger.NewLocals(tx.Sender, 2000)
+ test("txn Sender; acct_params_get AcctTotalAppsOptedIn; assert; int 1; ==")
+ test("txn Sender; acct_params_get AcctTotalNumUint; assert; int 8; int 6; +; ==")
+ test("txn Sender; acct_params_get AcctTotalNumByteSlice; assert; int 9; int 7; +; ==")
+
+ // No ASAs at first, then 1 created AND in total
+ test("txn Sender; acct_params_get AcctTotalAssetsCreated; assert; !")
+ test("txn Sender; acct_params_get AcctTotalAssets; assert; !")
+ ledger.NewAsset(tx.Sender, 3000, basics.AssetParams{})
+ test("txn Sender; acct_params_get AcctTotalAssetsCreated; assert; int 1; ==")
+ test("txn Sender; acct_params_get AcctTotalAssets; assert; int 1; ==")
+ })
}
+// TestGlobalNonDelete ensures that a deletion is not inserted in the delta if the global didn't exist
func TestGlobalNonDelete(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- ep, txn, ledger := makeSampleEnv()
- source := `
-byte "none"
-app_global_del
-int 1
-`
- ledger.NewApp(txn.Sender, 888, makeApp(0, 0, 1, 0))
- delta := testApp(t, source, ep)
- require.Empty(t, delta.GlobalDelta)
- require.Empty(t, delta.LocalDeltas)
+ testLogicRange(t, 2, 0, func(t *testing.T, ep *EvalParams, txn *transactions.Transaction, ledger *Ledger) {
+ source := `byte "none"; app_global_del; int 1`
+ ledger.NewApp(txn.Sender, 888, makeApp(0, 0, 1, 0))
+ delta := testApp(t, source, ep)
+ require.Empty(t, delta.GlobalDelta)
+ require.Empty(t, delta.LocalDeltas)
+ })
}
+// TestLocalNonDelete ensures that a deletion is not inserted in the delta if the local didn't exist
func TestLocalNonDelete(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- ep, txn, ledger := makeSampleEnv()
- source := `
-int 0
-byte "none"
-app_local_del
-int 1
-`
- ledger.NewAccount(txn.Sender, 100000)
- ledger.NewApp(txn.Sender, 888, makeApp(0, 0, 1, 0))
- ledger.NewLocals(txn.Sender, 888)
- delta := testApp(t, source, ep)
- require.Empty(t, delta.GlobalDelta)
- require.Empty(t, delta.LocalDeltas)
+ testLogicRange(t, 2, 0, func(t *testing.T, ep *EvalParams, txn *transactions.Transaction, ledger *Ledger) {
+ source := `int 0; byte "none"; app_local_del; int 1`
+ ledger.NewAccount(txn.Sender, 100000)
+ ledger.NewApp(txn.Sender, 888, makeApp(0, 0, 1, 0))
+ ledger.NewLocals(txn.Sender, 888)
+ delta := testApp(t, source, ep)
+ require.Empty(t, delta.GlobalDelta)
+ require.Empty(t, delta.LocalDeltas)
+ })
}
func TestAppLocalReadWriteDeleteErrors(t *testing.T) {
@@ -1231,8 +1466,8 @@ func TestAppLocalReadWriteDeleteErrors(t *testing.T) {
t.Parallel()
sourceRead := `intcblock 0 100 0x77 1
-bytecblock 0x414c474f 0x414c474f41
-intc_0 // 0, account idx (txn.Sender)
+bytecblock "ALGO" "ALGOA"
+txn Sender
intc_1 // 100, app id
bytec_0 // key "ALGO"
app_local_get_ex
@@ -1240,7 +1475,7 @@ app_local_get_ex
bnz error
intc_2 // 0x77
==
-intc_0 // 0
+txn Sender
intc_1 // 100
bytec_1 // ALGOA
app_local_get_ex
@@ -1256,36 +1491,29 @@ ok:
intc_3 // 1
`
sourceWrite := `intcblock 0 100 1
-bytecblock 0x414c474f
-intc_0 // 0, account idx (txn.Sender)
+bytecblock "ALGO"
+txn Sender
bytec_0 // key "ALGO"
intc_1 // 100
app_local_put
intc_2 // 1
`
sourceDelete := `intcblock 0 100
-bytecblock 0x414c474f
-intc_0 // account idx
+bytecblock "ALGO"
+txn Sender
bytec_0 // key "ALGO"
app_local_del
intc_1
`
- type cmdtest struct {
- source string
- accNumOffset int
- }
-
- tests := map[string]cmdtest{
- "read": {sourceRead, 20},
- "write": {sourceWrite, 13},
- "delete": {sourceDelete, 12},
+ tests := map[string]string{
+ "read": sourceRead,
+ "write": sourceWrite,
+ "delete": sourceDelete,
}
- for name, cmdtest := range tests {
- name, cmdtest := name, cmdtest
+ for name, source := range tests {
+ name, source := name, source
t.Run(fmt.Sprintf("test=%s", name), func(t *testing.T) {
t.Parallel()
- source := cmdtest.source
- firstCmdOffset := cmdtest.accNumOffset
ops := testProg(t, source, AssemblerMaxVersion)
@@ -1304,14 +1532,6 @@ intc_1
ep.Ledger = ledger
ep.SigLedger = ledger
- saved := ops.Program[firstCmdOffset]
- require.Equal(t, OpsByName[0]["intc_0"].Opcode, saved)
- ops.Program[firstCmdOffset] = OpsByName[0]["intc_1"].Opcode
- _, err = EvalApp(ops.Program, 0, 100, ep)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid Account reference 100")
-
- ops.Program[firstCmdOffset] = saved
_, err = EvalApp(ops.Program, 0, 100, ep)
require.Error(t, err)
require.Contains(t, err.Error(), "is not opted into")
@@ -1347,39 +1567,34 @@ func TestAppLocalStateReadWrite(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- txn := makeSampleAppl(100)
- ep := defaultEvalParams(txn)
- ledger := NewLedger(
- map[basics.Address]uint64{
- txn.Txn.Sender: 1,
- },
- )
- ep.Ledger = ledger
- ep.SigLedger = ledger
- ledger.NewApp(txn.Txn.Sender, 100, basics.AppParams{})
- ledger.NewLocals(txn.Txn.Sender, 100)
+ testLogicRange(t, 2, 0, func(t *testing.T, ep *EvalParams, txn *transactions.Transaction, ledger *Ledger) {
+
+ txn.ApplicationID = 100
+ ledger.NewAccount(txn.Sender, 1)
+ ledger.NewApp(txn.Sender, 100, basics.AppParams{})
+ ledger.NewLocals(txn.Sender, 100)
- // write int and bytes values
- source := `int 0 // account
-byte 0x414c474f // key "ALGO"
+ // write int and bytes values
+ source := `txn Sender
+byte "ALGO" // key
int 0x77 // value
app_local_put
-int 0 // account
-byte 0x414c474f41 // key "ALGOA"
-byte 0x414c474f // value
+txn Sender
+byte "ALGOA" // key
+byte "ALGO" // value
app_local_put
-int 0 // account
+txn Sender
int 100 // app id
-byte 0x414c474f41 // key "ALGOA"
+byte "ALGOA" // key
app_local_get_ex
bnz exist
err
exist:
-byte 0x414c474f
+byte "ALGO"
==
-int 0 // account
+txn Sender
int 100 // app id
-byte 0x414c474f // key "ALGO"
+byte "ALGO" // key
app_local_get_ex
bnz exist2
err
@@ -1388,27 +1603,30 @@ int 0x77
==
&&
`
- delta := testApp(t, source, ep)
- require.Empty(t, delta.GlobalDelta)
- require.Len(t, delta.LocalDeltas, 1)
-
- require.Len(t, delta.LocalDeltas[0], 2)
- vd := delta.LocalDeltas[0]["ALGO"]
- require.Equal(t, basics.SetUintAction, vd.Action)
- require.Equal(t, uint64(0x77), vd.Uint)
-
- vd = delta.LocalDeltas[0]["ALGOA"]
- require.Equal(t, basics.SetBytesAction, vd.Action)
- require.Equal(t, "ALGO", vd.Bytes)
-
- // write same value without writing, expect no local delta
- source = `int 0 // account
-byte 0x414c474f // key
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ source = strings.ReplaceAll(source, "txn Sender", "int 0")
+ }
+ delta := testApp(t, source, ep)
+ require.Empty(t, delta.GlobalDelta)
+ require.Len(t, delta.LocalDeltas, 1)
+
+ require.Len(t, delta.LocalDeltas[0], 2)
+ vd := delta.LocalDeltas[0]["ALGO"]
+ require.Equal(t, basics.SetUintAction, vd.Action)
+ require.Equal(t, uint64(0x77), vd.Uint)
+
+ vd = delta.LocalDeltas[0]["ALGOA"]
+ require.Equal(t, basics.SetBytesAction, vd.Action)
+ require.Equal(t, "ALGO", vd.Bytes)
+
+ // write same value without writing, expect no local delta
+ source = `txn Sender
+byte "ALGO" // key
int 0x77 // value
app_local_put
-int 0 // account
+txn Sender
int 100 // app id
-byte 0x414c474f // key
+byte "ALGO" // key
app_local_get_ex
bnz exist
err
@@ -1416,73 +1634,82 @@ exist:
int 0x77
==
`
- ledger.Reset()
- ledger.NoLocal(txn.Txn.Sender, 100, "ALGOA")
- ledger.NoLocal(txn.Txn.Sender, 100, "ALGO")
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ source = strings.ReplaceAll(source, "txn Sender", "int 0")
+ }
+ ledger.Reset()
+ ledger.NoLocal(txn.Sender, 100, "ALGOA")
+ ledger.NoLocal(txn.Sender, 100, "ALGO")
- algoValue := basics.TealValue{Type: basics.TealUintType, Uint: 0x77}
- ledger.NewLocal(txn.Txn.Sender, 100, "ALGO", algoValue)
+ algoValue := basics.TealValue{Type: basics.TealUintType, Uint: 0x77}
+ ledger.NewLocal(txn.Sender, 100, "ALGO", algoValue)
- delta = testApp(t, source, ep)
- require.Empty(t, delta.GlobalDelta)
- require.Empty(t, delta.LocalDeltas)
+ delta = testApp(t, source, ep)
+ require.Empty(t, delta.GlobalDelta)
+ require.Empty(t, delta.LocalDeltas)
- // write same value after reading, expect no local delta
- source = `int 0 // account
+ // write same value after reading, expect no local delta
+ source = `txn Sender
int 100 // app id
-byte 0x414c474f // key
+byte "ALGO" // key
app_local_get_ex
bnz exist
err
exist:
-int 0 // account
-byte 0x414c474f // key
+txn Sender
+byte "ALGO" // key
int 0x77 // value
app_local_put
-int 0 // account
+txn Sender
int 100 // app id
-byte 0x414c474f // key
+byte "ALGO" // key
app_local_get_ex
bnz exist2
err
exist2:
==
`
- ledger.Reset()
- ledger.NewLocal(txn.Txn.Sender, 100, "ALGO", algoValue)
- ledger.NoLocal(txn.Txn.Sender, 100, "ALGOA")
+ ledger.Reset()
+ ledger.NewLocal(txn.Sender, 100, "ALGO", algoValue)
+ ledger.NoLocal(txn.Sender, 100, "ALGOA")
- delta = testApp(t, source, ep)
- require.Empty(t, delta.GlobalDelta)
- require.Empty(t, delta.LocalDeltas)
-
- // write a value and expect local delta change
- source = `int 0 // account
-byte 0x414c474f41 // key "ALGOA"
-int 0x78 // value
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ source = strings.ReplaceAll(source, "txn Sender", "int 0")
+ }
+ delta = testApp(t, source, ep)
+ require.Empty(t, delta.GlobalDelta)
+ require.Empty(t, delta.LocalDeltas)
+
+ // write a value and expect local delta change
+ source = `txn Sender
+byte "ALGOA" // key
+int 0x78 // value
app_local_put
int 1
`
- ledger.Reset()
- ledger.NewLocal(txn.Txn.Sender, 100, "ALGO", algoValue)
- ledger.NoLocal(txn.Txn.Sender, 100, "ALGOA")
-
- delta = testApp(t, source, ep)
- require.Empty(t, delta.GlobalDelta)
- require.Len(t, delta.LocalDeltas, 1)
- require.Len(t, delta.LocalDeltas[0], 1)
- vd = delta.LocalDeltas[0]["ALGOA"]
- require.Equal(t, basics.SetUintAction, vd.Action)
- require.Equal(t, uint64(0x78), vd.Uint)
-
- // write a value to existing key and expect delta change and reading the new value
- source = `int 0 // account
-byte 0x414c474f // key "ALGO"
+ ledger.Reset()
+ ledger.NewLocal(txn.Sender, 100, "ALGO", algoValue)
+ ledger.NoLocal(txn.Sender, 100, "ALGOA")
+
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ source = strings.ReplaceAll(source, "txn Sender", "int 0")
+ }
+ delta = testApp(t, source, ep)
+ require.Empty(t, delta.GlobalDelta)
+ require.Len(t, delta.LocalDeltas, 1)
+ require.Len(t, delta.LocalDeltas[0], 1)
+ vd = delta.LocalDeltas[0]["ALGOA"]
+ require.Equal(t, basics.SetUintAction, vd.Action)
+ require.Equal(t, uint64(0x78), vd.Uint)
+
+ // write a value to existing key and expect delta change and reading the new value
+ source = `txn Sender
+byte "ALGO" // key
int 0x78 // value
app_local_put
-int 0 // account
+txn Sender
int 100 // app id
-byte 0x414c474f // key "ALGO"
+byte "ALGO" // key
app_local_get_ex
bnz exist
err
@@ -1490,124 +1717,143 @@ exist:
int 0x78
==
`
- ledger.Reset()
- ledger.NewLocal(txn.Txn.Sender, 100, "ALGO", algoValue)
- ledger.NoLocal(txn.Txn.Sender, 100, "ALGOA")
-
- delta = testApp(t, source, ep)
- require.Empty(t, delta.GlobalDelta)
- require.Len(t, delta.LocalDeltas, 1)
- require.Len(t, delta.LocalDeltas[0], 1)
- vd = delta.LocalDeltas[0]["ALGO"]
- require.Equal(t, basics.SetUintAction, vd.Action)
- require.Equal(t, uint64(0x78), vd.Uint)
-
- // write a value after read and expect delta change
- source = `int 0 // account
+ ledger.Reset()
+ ledger.NewLocal(txn.Sender, 100, "ALGO", algoValue)
+ ledger.NoLocal(txn.Sender, 100, "ALGOA")
+
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ source = strings.ReplaceAll(source, "txn Sender", "int 0")
+ }
+ delta = testApp(t, source, ep)
+ require.Empty(t, delta.GlobalDelta)
+ require.Len(t, delta.LocalDeltas, 1)
+ require.Len(t, delta.LocalDeltas[0], 1)
+ vd = delta.LocalDeltas[0]["ALGO"]
+ require.Equal(t, basics.SetUintAction, vd.Action)
+ require.Equal(t, uint64(0x78), vd.Uint)
+
+ // write a value after read and expect delta change
+ source = `txn Sender
int 100 // app id
-byte 0x414c474f // key "ALGO"
+byte "ALGO" // key
app_local_get_ex
bnz exist
err
exist:
-int 0 // account
-byte 0x414c474f // key "ALGO"
+txn Sender
+byte "ALGO" // key
int 0x78 // value
app_local_put
`
- ledger.Reset()
- ledger.NewLocal(txn.Txn.Sender, 100, "ALGO", algoValue)
- ledger.NoLocal(txn.Txn.Sender, 100, "ALGOA")
-
- delta = testApp(t, source, ep)
- require.Empty(t, delta.GlobalDelta)
- require.Len(t, delta.LocalDeltas, 1)
- require.Len(t, delta.LocalDeltas[0], 1)
- vd = delta.LocalDeltas[0]["ALGO"]
- require.Equal(t, basics.SetUintAction, vd.Action)
- require.Equal(t, uint64(0x78), vd.Uint)
-
- // write a few values and expect delta change only for unique changed
- source = `int 0 // account
-byte 0x414c474f // key "ALGO"
+ ledger.Reset()
+ ledger.NewLocal(txn.Sender, 100, "ALGO", algoValue)
+ ledger.NoLocal(txn.Sender, 100, "ALGOA")
+
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ source = strings.ReplaceAll(source, "txn Sender", "int 0")
+ }
+ delta = testApp(t, source, ep)
+ require.Empty(t, delta.GlobalDelta)
+ require.Len(t, delta.LocalDeltas, 1)
+ require.Len(t, delta.LocalDeltas[0], 1)
+ vd = delta.LocalDeltas[0]["ALGO"]
+ require.Equal(t, basics.SetUintAction, vd.Action)
+ require.Equal(t, uint64(0x78), vd.Uint)
+
+ // write a few values and expect delta change only for unique changed
+ source = `txn Sender
+byte "ALGO" // key
int 0x77 // value
app_local_put
-int 0 // account
-byte 0x414c474f // key "ALGO"
+txn Sender
+byte "ALGO" // key
int 0x78 // value
app_local_put
-int 0 // account
-byte 0x414c474f41 // key "ALGOA"
+txn Sender
+byte "ALGOA" // key
int 0x78 // value
app_local_put
-int 1 // account
-byte 0x414c474f // key "ALGO"
+txn Accounts 1
+byte "ALGO" // key
int 0x79 // value
app_local_put
int 1
`
- ledger.Reset()
- ledger.NewLocal(txn.Txn.Sender, 100, "ALGO", algoValue)
- ledger.NoLocal(txn.Txn.Sender, 100, "ALGOA")
+ ledger.Reset()
+ ledger.NewLocal(txn.Sender, 100, "ALGO", algoValue)
+ ledger.NoLocal(txn.Sender, 100, "ALGOA")
- ledger.NewAccount(txn.Txn.Receiver, 500)
- ledger.NewLocals(txn.Txn.Receiver, 100)
+ ledger.NewAccount(txn.Receiver, 500)
+ ledger.NewLocals(txn.Receiver, 100)
- delta = testApp(t, source, ep)
- require.Empty(t, delta.GlobalDelta)
- require.Len(t, delta.LocalDeltas, 2)
- require.Len(t, delta.LocalDeltas[0], 2)
- require.Len(t, delta.LocalDeltas[1], 1)
- vd = delta.LocalDeltas[0]["ALGO"]
- require.Equal(t, basics.SetUintAction, vd.Action)
- require.Equal(t, uint64(0x78), vd.Uint)
-
- vd = delta.LocalDeltas[0]["ALGOA"]
- require.Equal(t, basics.SetUintAction, vd.Action)
- require.Equal(t, uint64(0x78), vd.Uint)
-
- vd = delta.LocalDeltas[1]["ALGO"]
- require.Equal(t, basics.SetUintAction, vd.Action)
- require.Equal(t, uint64(0x79), vd.Uint)
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ source = strings.ReplaceAll(source, "txn Sender", "int 0")
+ source = strings.ReplaceAll(source, "txn Accounts 1", "int 1")
+ }
+ delta = testApp(t, source, ep)
+ require.Empty(t, delta.GlobalDelta)
+ require.Len(t, delta.LocalDeltas, 2)
+ require.Len(t, delta.LocalDeltas[0], 2)
+ require.Len(t, delta.LocalDeltas[1], 1)
+ vd = delta.LocalDeltas[0]["ALGO"]
+ require.Equal(t, basics.SetUintAction, vd.Action)
+ require.Equal(t, uint64(0x78), vd.Uint)
+
+ vd = delta.LocalDeltas[0]["ALGOA"]
+ require.Equal(t, basics.SetUintAction, vd.Action)
+ require.Equal(t, uint64(0x78), vd.Uint)
+
+ vd = delta.LocalDeltas[1]["ALGO"]
+ require.Equal(t, basics.SetUintAction, vd.Action)
+ require.Equal(t, uint64(0x79), vd.Uint)
+ })
}
func TestAppLocalGlobalErrorCases(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- ep, tx, ledger := makeSampleEnv()
- ledger.NewApp(tx.Sender, 888, basics.AppParams{})
+ testLogicRange(t, 2, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ ledger.NewApp(tx.Sender, 888, basics.AppParams{})
- testApp(t, fmt.Sprintf(`byte "%v"; int 1; app_global_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppKeyLen+1)), ep, "key too long")
+ g, l := "app_global_put;", "app_local_put;"
+ sender := "txn Sender;"
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ sender = "int 0;"
+ }
+ testApp(t, fmt.Sprintf(`byte "%v"; int 1;`+g+`int 1`, strings.Repeat("v", ep.Proto.MaxAppKeyLen+1)), ep, "key too long")
- testApp(t, fmt.Sprintf(`byte "%v"; int 1; app_global_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppKeyLen)), ep)
+ testApp(t, fmt.Sprintf(`byte "%v"; int 1;`+g+`int 1`, strings.Repeat("v", ep.Proto.MaxAppKeyLen)), ep)
- ledger.NewLocals(tx.Sender, 888)
- testApp(t, fmt.Sprintf(`int 0; byte "%v"; int 1; app_local_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppKeyLen+1)), ep, "key too long")
+ ledger.NewLocals(tx.Sender, 888)
+ testApp(t, fmt.Sprintf(sender+`byte "%v"; int 1;`+l+`int 1`, strings.Repeat("v", ep.Proto.MaxAppKeyLen+1)), ep, "key too long")
- testApp(t, fmt.Sprintf(`int 0; byte "%v"; int 1; app_local_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppKeyLen)), ep)
+ testApp(t, fmt.Sprintf(sender+`byte "%v"; int 1;`+l+`int 1`, strings.Repeat("v", ep.Proto.MaxAppKeyLen)), ep)
- testApp(t, fmt.Sprintf(`byte "foo"; byte "%v"; app_global_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppBytesValueLen+1)), ep, "value too long for key")
+ testApp(t, fmt.Sprintf(`byte "foo"; byte "%v";`+g+`int 1`, strings.Repeat("v", ep.Proto.MaxAppBytesValueLen+1)), ep, "value too long for key")
- testApp(t, fmt.Sprintf(`byte "foo"; byte "%v"; app_global_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppBytesValueLen)), ep)
+ testApp(t, fmt.Sprintf(`byte "foo"; byte "%v";`+g+`int 1`, strings.Repeat("v", ep.Proto.MaxAppBytesValueLen)), ep)
- testApp(t, fmt.Sprintf(`int 0; byte "foo"; byte "%v"; app_local_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppBytesValueLen+1)), ep, "value too long for key")
+ testApp(t, fmt.Sprintf(sender+`byte "foo"; byte "%v";`+l+`int 1`, strings.Repeat("v", ep.Proto.MaxAppBytesValueLen+1)), ep, "value too long for key")
- testApp(t, fmt.Sprintf(`int 0; byte "foo"; byte "%v"; app_local_put; int 1`, strings.Repeat("v", ep.Proto.MaxAppBytesValueLen)), ep)
+ testApp(t, fmt.Sprintf(sender+`byte "foo"; byte "%v";`+l+`int 1`, strings.Repeat("v", ep.Proto.MaxAppBytesValueLen)), ep)
- ep.Proto.MaxAppSumKeyValueLens = 2 // Override to generate error.
- testApp(t, `byte "foo"; byte "foo"; app_global_put; int 1`, ep, "key/value total too long for key")
+ ep.Proto.MaxAppSumKeyValueLens = 2 // Override to generate error.
+ testApp(t, `byte "foo"; byte "foo";`+g+`int 1`, ep, "key/value total too long for key")
- testApp(t, `int 0; byte "foo"; byte "foo"; app_local_put; int 1`, ep, "key/value total too long for key")
+ testApp(t, sender+`byte "foo"; byte "foo";`+l+`int 1`, ep, "key/value total too long for key")
+ })
}
func TestAppGlobalReadWriteDeleteErrors(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
- sourceRead := `int 0
-byte 0x414c474f // key "ALGO"
+ testLogicRange(t, 2, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ v := ep.Proto.LogicSigVersion
+
+ sourceRead := `int 0
+byte "ALGO" // key
app_global_get_ex
bnz ok
err
@@ -1615,39 +1861,16 @@ ok:
int 0x77
==
`
- sourceReadSimple := `byte 0x414c474f // key "ALGO"
-app_global_get
-int 0x77
-==
-`
-
- sourceWrite := `byte 0x414c474f // key "ALGO"
-int 100
-app_global_put
-int 1
-`
- sourceDelete := `byte 0x414c474f // key "ALGO"
-app_global_del
-int 1
-`
- tests := map[string]string{
- "read": sourceRead,
- "reads": sourceReadSimple,
- "write": sourceWrite,
- "delete": sourceDelete,
- }
- for name, source := range tests {
- name, source := name, source
- t.Run(fmt.Sprintf("test=%s", name), func(t *testing.T) {
- t.Parallel()
- ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
-
- ep, txn, ledger := makeSampleEnv()
- txn.ApplicationID = basics.AppIndex(100)
- testAppBytes(t, ops.Program, ep, "no such app")
-
- ledger.NewApp(txn.Sender, 100, makeApp(0, 0, 1, 0))
+ tests := map[string]string{
+ "read": sourceRead,
+ "reads": `byte "ALGO"; app_global_get; int 0x77; ==`,
+ "write": `byte "ALGO"; int 100; app_global_put; int 1`,
+ "delete": `byte "ALGO"; app_global_del; int 1`,
+ }
+ tx.ApplicationID = 100
+ ledger.NewApp(tx.Sender, 100, makeApp(0, 0, 1, 0))
+ for name, source := range tests {
+ ops := testProg(t, source, v)
// a special test for read
if name == "read" {
@@ -1659,56 +1882,58 @@ int 1
delta := testAppBytes(t, ops.Program, ep)
require.Empty(t, delta.LocalDeltas)
- })
- }
+ }
+ })
}
func TestAppGlobalReadWrite(t *testing.T) {
partitiontest.PartitionTest(t)
-
t.Parallel()
- // check writing ints and bytes
- source := `byte 0x414c474f // key "ALGO"
+ for _, bySlot := range []bool{true, false} {
+ testLogicRange(t, 2, 0, func(t *testing.T, ep *EvalParams, txn *transactions.Transaction, ledger *Ledger) {
+
+ // check writing ints and bytes
+ source := `byte "ALGO" // key
int 0x77 // value
app_global_put
-byte 0x414c474f41 // key "ALGOA"
-byte 0x414c474f // value
+byte "ALGOA" // key "ALGOA"
+byte "ALGO" // value
app_global_put
// check simple
-byte 0x414c474f41 // key "ALGOA"
+byte "ALGOA" // key "ALGOA"
app_global_get
-byte 0x414c474f
+byte "ALGO"
==
// check generic with alias
int 0 // current app id alias
-byte 0x414c474f41 // key "ALGOA"
+byte "ALGOA" // key "ALGOA"
app_global_get_ex
bnz ok
err
ok:
-byte 0x414c474f
+byte "ALGO"
==
&&
// check generic with exact app id
-int 1 // ForeignApps index - current app
-byte 0x414c474f41 // key "ALGOA"
+THISAPP
+byte "ALGOA" // key "ALGOA"
app_global_get_ex
bnz ok1
err
ok1:
-byte 0x414c474f
+byte "ALGO"
==
&&
// check simple
-byte 0x414c474f
+byte "ALGO"
app_global_get
int 0x77
==
&&
// check generic with alias
int 0 // ForeignApps index - current app
-byte 0x414c474f
+byte "ALGO"
app_global_get_ex
bnz ok2
err
@@ -1717,8 +1942,8 @@ int 0x77
==
&&
// check generic with exact app id
-int 1 // ForeignApps index - current app
-byte 0x414c474f
+THISAPP
+byte "ALGO"
app_global_get_ex
bnz ok3
err
@@ -1727,137 +1952,148 @@ int 0x77
==
&&
`
- txn := makeSampleAppl(100)
- txn.Txn.ForeignApps = []basics.AppIndex{txn.Txn.ApplicationID}
- ep := defaultEvalParams(txn)
- ledger := NewLedger(
- map[basics.Address]uint64{
- txn.Txn.Sender: 1,
- },
- )
- ep.Ledger = ledger
- ep.SigLedger = ledger
- ledger.NewApp(txn.Txn.Sender, 100, basics.AppParams{})
- delta := testApp(t, source, ep)
+ txn.Type = protocol.ApplicationCallTx
+ txn.ApplicationID = 100
+ txn.ForeignApps = []basics.AppIndex{txn.ApplicationID}
+ ledger.NewAccount(txn.Sender, 1)
+ ledger.NewApp(txn.Sender, 100, basics.AppParams{})
- require.Len(t, delta.GlobalDelta, 2)
- require.Empty(t, delta.LocalDeltas)
+ if bySlot {
+ // 100 is in the ForeignApps array, name it by slot
+ source = strings.ReplaceAll(source, "THISAPP", "int 1")
+ } else {
+ // use the actual app number
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ return
+ }
+ source = strings.ReplaceAll(source, "THISAPP", "int 100")
+ }
+ delta := testApp(t, source, ep)
+
+ require.Len(t, delta.GlobalDelta, 2)
+ require.Empty(t, delta.LocalDeltas)
- vd := delta.GlobalDelta["ALGO"]
- require.Equal(t, basics.SetUintAction, vd.Action)
- require.Equal(t, uint64(0x77), vd.Uint)
+ vd := delta.GlobalDelta["ALGO"]
+ require.Equal(t, basics.SetUintAction, vd.Action)
+ require.Equal(t, uint64(0x77), vd.Uint)
- vd = delta.GlobalDelta["ALGOA"]
- require.Equal(t, basics.SetBytesAction, vd.Action)
- require.Equal(t, "ALGO", vd.Bytes)
+ vd = delta.GlobalDelta["ALGOA"]
+ require.Equal(t, basics.SetBytesAction, vd.Action)
+ require.Equal(t, "ALGO", vd.Bytes)
- // write existing value before read
- source = `byte 0x414c474f // key "ALGO"
+ // write existing value before read
+ source = `byte "ALGO" // key
int 0x77 // value
app_global_put
-byte 0x414c474f
+byte "ALGO"
app_global_get
int 0x77
==
`
- ledger.Reset()
- ledger.NoGlobal(100, "ALGOA")
- ledger.NoGlobal(100, "ALGO")
+ ledger.Reset()
+ ledger.NoGlobal(100, "ALGOA")
+ ledger.NoGlobal(100, "ALGO")
- algoValue := basics.TealValue{Type: basics.TealUintType, Uint: 0x77}
- ledger.NewGlobal(100, "ALGO", algoValue)
+ algoValue := basics.TealValue{Type: basics.TealUintType, Uint: 0x77}
+ ledger.NewGlobal(100, "ALGO", algoValue)
- delta = testApp(t, source, ep)
- require.Empty(t, delta.GlobalDelta)
- require.Empty(t, delta.LocalDeltas)
+ delta = testApp(t, source, ep)
+ require.Empty(t, delta.GlobalDelta)
+ require.Empty(t, delta.LocalDeltas)
- // write existing value after read
- source = `int 0
-byte 0x414c474f
+ // write existing value after read
+ source = `int 0
+byte "ALGO"
app_global_get_ex
bnz ok
err
ok:
pop
-byte 0x414c474f
+byte "ALGO"
int 0x77
app_global_put
-byte 0x414c474f
+byte "ALGO"
app_global_get
int 0x77
==
`
- ledger.Reset()
- ledger.NoGlobal(100, "ALGOA")
- ledger.NewGlobal(100, "ALGO", algoValue)
+ ledger.Reset()
+ ledger.NoGlobal(100, "ALGOA")
+ ledger.NewGlobal(100, "ALGO", algoValue)
- delta = testApp(t, source, ep)
- require.Empty(t, delta.GlobalDelta)
- require.Empty(t, delta.LocalDeltas)
+ delta = testApp(t, source, ep)
+ require.Empty(t, delta.GlobalDelta)
+ require.Empty(t, delta.LocalDeltas)
- // write new values after and before read
- source = `int 0
-byte 0x414c474f
+ // write new values after and before read
+ source = `int 0
+byte "ALGO"
app_global_get_ex
bnz ok
err
ok:
pop
-byte 0x414c474f
+byte "ALGO"
int 0x78
app_global_put
int 0
-byte 0x414c474f
+byte "ALGO"
app_global_get_ex
bnz ok2
err
ok2:
int 0x78
==
-byte 0x414c474f41
-byte 0x414c474f
+byte "ALGOA"
+byte "ALGO"
app_global_put
int 0
-byte 0x414c474f41
+byte "ALGOA"
app_global_get_ex
bnz ok3
err
ok3:
-byte 0x414c474f
+byte "ALGO"
==
&&
`
- ledger.Reset()
- ledger.NoGlobal(100, "ALGOA")
- ledger.NewGlobal(100, "ALGO", algoValue)
+ ledger.Reset()
+ ledger.NoGlobal(100, "ALGOA")
+ ledger.NewGlobal(100, "ALGO", algoValue)
- delta = testApp(t, source, ep)
+ delta = testApp(t, source, ep)
- require.Len(t, delta.GlobalDelta, 2)
- require.Empty(t, delta.LocalDeltas)
+ require.Len(t, delta.GlobalDelta, 2)
+ require.Empty(t, delta.LocalDeltas)
- vd = delta.GlobalDelta["ALGO"]
- require.Equal(t, basics.SetUintAction, vd.Action)
- require.Equal(t, uint64(0x78), vd.Uint)
+ vd = delta.GlobalDelta["ALGO"]
+ require.Equal(t, basics.SetUintAction, vd.Action)
+ require.Equal(t, uint64(0x78), vd.Uint)
- vd = delta.GlobalDelta["ALGOA"]
- require.Equal(t, basics.SetBytesAction, vd.Action)
- require.Equal(t, "ALGO", vd.Bytes)
+ vd = delta.GlobalDelta["ALGOA"]
+ require.Equal(t, basics.SetBytesAction, vd.Action)
+ require.Equal(t, "ALGO", vd.Bytes)
+ })
+ }
}
func TestAppGlobalReadOtherApp(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- source := `int 2 // ForeignApps index
+ // app_global_get_ex starts in v2
+ for _, bySlot := range []bool{true, false} {
+ testLogicRange(t, 2, 0, func(t *testing.T, ep *EvalParams, txn *transactions.Transaction, ledger *Ledger) {
+ source := `
+OTHERAPP
byte "mykey1"
app_global_get_ex
bz ok1
err
ok1:
pop
-int 2 // ForeignApps index
+OTHERAPP
byte "mykey"
app_global_get_ex
bnz ok2
@@ -1867,24 +2103,35 @@ byte "myval"
==
`
- ep, txn, ledger := makeSampleEnv()
- txn.ApplicationID = 100
- txn.ForeignApps = []basics.AppIndex{txn.ApplicationID, 101}
- ledger.NewAccount(txn.Sender, 1)
- ledger.NewApp(txn.Sender, 100, basics.AppParams{})
+ if bySlot {
+ source = strings.ReplaceAll(source, "OTHERAPP", "int 2")
+ } else {
+ // use the actual app number if allowed
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ return
+ }
+ source = strings.ReplaceAll(source, "OTHERAPP", "int 101")
+ }
- delta := testApp(t, source, ep, "no such app")
- require.Empty(t, delta.GlobalDelta)
- require.Empty(t, delta.LocalDeltas)
+ txn.ApplicationID = 100
+ txn.ForeignApps = []basics.AppIndex{txn.ApplicationID, 101}
+ ledger.NewAccount(txn.Sender, 1)
+ ledger.NewApp(txn.Sender, 100, basics.AppParams{})
- ledger.NewApp(txn.Receiver, 101, basics.AppParams{})
- ledger.NewApp(txn.Receiver, 100, basics.AppParams{}) // this keeps current app id = 100
- algoValue := basics.TealValue{Type: basics.TealBytesType, Bytes: "myval"}
- ledger.NewGlobal(101, "mykey", algoValue)
+ delta := testApp(t, source, ep, "no app 101")
+ require.Empty(t, delta.GlobalDelta)
+ require.Empty(t, delta.LocalDeltas)
- delta = testApp(t, source, ep)
- require.Empty(t, delta.GlobalDelta)
- require.Empty(t, delta.LocalDeltas)
+ ledger.NewApp(txn.Receiver, 101, basics.AppParams{})
+ ledger.NewApp(txn.Receiver, 100, basics.AppParams{}) // this keeps current app id = 100
+ algoValue := basics.TealValue{Type: basics.TealBytesType, Bytes: "myval"}
+ ledger.NewGlobal(101, "mykey", algoValue)
+
+ delta = testApp(t, source, ep)
+ require.Empty(t, delta.GlobalDelta)
+ require.Empty(t, delta.LocalDeltas)
+ })
+ }
}
func TestBlankKey(t *testing.T) {
@@ -1907,39 +2154,40 @@ app_global_get
int 7
==
`
- txn := makeSampleAppl(100)
- ep := defaultEvalParams(txn)
- ledger := NewLedger(nil)
- ledger.NewAccount(txn.Txn.Sender, 1)
- ep.Ledger = ledger
- ep.SigLedger = ledger
- ledger.NewApp(txn.Txn.Sender, 100, basics.AppParams{})
-
- delta := testApp(t, source, ep)
- require.Empty(t, delta.LocalDeltas)
+ // v3 gives "assert"
+ testLogicRange(t, 3, 0, func(t *testing.T, ep *EvalParams, txn *transactions.Transaction, ledger *Ledger) {
+ txn.ApplicationID = 100
+ ledger.NewAccount(txn.Sender, 1)
+ ledger.NewApp(txn.Sender, 100, basics.AppParams{})
+
+ delta := testApp(t, source, ep)
+ require.Empty(t, delta.LocalDeltas)
+ })
}
func TestAppGlobalDelete(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- // check write/delete/read
- source := `byte 0x414c474f // key "ALGO"
+ for _, bySlot := range []bool{true, false} {
+ testLogicRange(t, 2, 0, func(t *testing.T, ep *EvalParams, txn *transactions.Transaction, ledger *Ledger) {
+ // check write/delete/read
+ source := `byte "ALGO"
int 0x77 // value
app_global_put
-byte 0x414c474f41 // key "ALGOA"
-byte 0x414c474f
+byte "ALGOA"
+byte "ALGO"
app_global_put
-byte 0x414c474f
+byte "ALGO"
app_global_del
-byte 0x414c474f41
+byte "ALGOA"
app_global_del
int 0
-byte 0x414c474f
+byte "ALGO"
app_global_get_ex
bnz error
int 0
-byte 0x414c474f41
+byte "ALGOA"
app_global_get_ex
bnz error
==
@@ -1949,156 +2197,170 @@ err
ok:
int 1
`
- ep, txn, ledger := makeSampleEnv()
- ledger.NewAccount(txn.Sender, 1)
- txn.ApplicationID = 100
- ledger.NewApp(txn.Sender, 100, basics.AppParams{})
- delta := testApp(t, source, ep)
- require.Len(t, delta.GlobalDelta, 2)
- require.Empty(t, delta.LocalDeltas)
+ ledger.NewAccount(txn.Sender, 1)
+ txn.ApplicationID = 100
+ ledger.NewApp(txn.Sender, 100, basics.AppParams{})
- ledger.Reset()
- ledger.NoGlobal(100, "ALGOA")
- ledger.NoGlobal(100, "ALGO")
+ delta := testApp(t, source, ep)
+ require.Len(t, delta.GlobalDelta, 2)
+ require.Empty(t, delta.LocalDeltas)
- algoValue := basics.TealValue{Type: basics.TealUintType, Uint: 0x77}
- ledger.NewGlobal(100, "ALGO", algoValue)
+ ledger.Reset()
+ ledger.NoGlobal(100, "ALGOA")
+ ledger.NoGlobal(100, "ALGO")
- // check delete existing
- source = `byte 0x414c474f // key "ALGO"
+ algoValue := basics.TealValue{Type: basics.TealUintType, Uint: 0x77}
+ ledger.NewGlobal(100, "ALGO", algoValue)
+
+ // check delete existing
+ source = `byte "ALGO"
app_global_del
-int 1
-byte 0x414c474f
+THISAPP
+byte "ALGO"
app_global_get_ex
== // two zeros
`
- txn.ForeignApps = []basics.AppIndex{txn.ApplicationID}
- delta = testApp(t, source, ep)
- require.Len(t, delta.GlobalDelta, 1)
- vd := delta.GlobalDelta["ALGO"]
- require.Equal(t, basics.DeleteAction, vd.Action)
- require.Equal(t, uint64(0), vd.Uint)
- require.Equal(t, "", vd.Bytes)
- require.Equal(t, 0, len(delta.LocalDeltas))
- ledger.Reset()
- ledger.NoGlobal(100, "ALGOA")
- ledger.NoGlobal(100, "ALGO")
+ if bySlot {
+ // 100 is in the ForeignApps array, name it by slot
+ source = strings.ReplaceAll(source, "THISAPP", "int 1")
+ } else {
+ // use the actual app number if allowed
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ return
+ }
+ source = strings.ReplaceAll(source, "THISAPP", "int 100")
+ }
+ txn.ForeignApps = []basics.AppIndex{txn.ApplicationID}
+ delta = testApp(t, source, ep)
+ require.Len(t, delta.GlobalDelta, 1)
+ vd := delta.GlobalDelta["ALGO"]
+ require.Equal(t, basics.DeleteAction, vd.Action)
+ require.Equal(t, uint64(0), vd.Uint)
+ require.Equal(t, "", vd.Bytes)
+ require.Equal(t, 0, len(delta.LocalDeltas))
- ledger.NewGlobal(100, "ALGO", algoValue)
+ ledger.Reset()
+ ledger.NoGlobal(100, "ALGOA")
+ ledger.NoGlobal(100, "ALGO")
- // check delete and write non-existing
- source = `byte 0x414c474f41 // key "ALGOA"
+ ledger.NewGlobal(100, "ALGO", algoValue)
+
+ // check delete and write non-existing
+ source = `byte "ALGOA"
app_global_del
int 0
-byte 0x414c474f41
+byte "ALGOA"
app_global_get_ex
== // two zeros
-byte 0x414c474f41
+byte "ALGOA"
int 0x78
app_global_put
`
- delta = testApp(t, source, ep)
- require.Len(t, delta.GlobalDelta, 1)
- vd = delta.GlobalDelta["ALGOA"]
- require.Equal(t, basics.SetUintAction, vd.Action)
- require.Equal(t, uint64(0x78), vd.Uint)
- require.Equal(t, "", vd.Bytes)
- require.Empty(t, delta.LocalDeltas)
+ delta = testApp(t, source, ep)
+ require.Len(t, delta.GlobalDelta, 1)
+ vd = delta.GlobalDelta["ALGOA"]
+ require.Equal(t, basics.SetUintAction, vd.Action)
+ require.Equal(t, uint64(0x78), vd.Uint)
+ require.Equal(t, "", vd.Bytes)
+ require.Empty(t, delta.LocalDeltas)
- ledger.Reset()
- ledger.NoGlobal(100, "ALGOA")
- ledger.NoGlobal(100, "ALGO")
+ ledger.Reset()
+ ledger.NoGlobal(100, "ALGOA")
+ ledger.NoGlobal(100, "ALGO")
- ledger.NewGlobal(100, "ALGO", algoValue)
+ ledger.NewGlobal(100, "ALGO", algoValue)
- // check delete and write existing
- source = `byte 0x414c474f // key "ALGO"
+ // check delete and write existing
+ source = `byte "ALGO"
app_global_del
-byte 0x414c474f
+byte "ALGO"
int 0x78
app_global_put
int 1
`
- delta = testApp(t, source, ep)
- require.Len(t, delta.GlobalDelta, 1)
- vd = delta.GlobalDelta["ALGO"]
- require.Equal(t, basics.SetUintAction, vd.Action)
- require.Empty(t, delta.LocalDeltas)
+ delta = testApp(t, source, ep)
+ require.Len(t, delta.GlobalDelta, 1)
+ vd = delta.GlobalDelta["ALGO"]
+ require.Equal(t, basics.SetUintAction, vd.Action)
+ require.Empty(t, delta.LocalDeltas)
- ledger.Reset()
- ledger.Reset()
- ledger.NoGlobal(100, "ALGOA")
- ledger.NoGlobal(100, "ALGO")
+ ledger.Reset()
+ ledger.Reset()
+ ledger.NoGlobal(100, "ALGOA")
+ ledger.NoGlobal(100, "ALGO")
- ledger.NewGlobal(100, "ALGO", algoValue)
+ ledger.NewGlobal(100, "ALGO", algoValue)
- // check delete,write,delete existing
- source = `byte 0x414c474f // key "ALGO"
+ // check delete,write,delete existing
+ source = `byte "ALGO"
app_global_del
-byte 0x414c474f
+byte "ALGO"
int 0x78
app_global_put
-byte 0x414c474f
+byte "ALGO"
app_global_del
int 1
`
- delta = testApp(t, source, ep)
- require.Len(t, delta.GlobalDelta, 1)
- vd = delta.GlobalDelta["ALGO"]
- require.Equal(t, basics.DeleteAction, vd.Action)
- require.Empty(t, delta.LocalDeltas)
+ delta = testApp(t, source, ep)
+ require.Len(t, delta.GlobalDelta, 1)
+ vd = delta.GlobalDelta["ALGO"]
+ require.Equal(t, basics.DeleteAction, vd.Action)
+ require.Empty(t, delta.LocalDeltas)
- ledger.Reset()
- ledger.Reset()
- ledger.NoGlobal(100, "ALGOA")
- ledger.NoGlobal(100, "ALGO")
+ ledger.Reset()
+ ledger.Reset()
+ ledger.NoGlobal(100, "ALGOA")
+ ledger.NoGlobal(100, "ALGO")
- ledger.NewGlobal(100, "ALGO", algoValue)
+ ledger.NewGlobal(100, "ALGO", algoValue)
- // check delete, write, delete non-existing
- source = `byte 0x414c474f41 // key "ALGOA"
+ // check delete, write, delete non-existing
+ source = `byte "ALGOA" // key "ALGOA"
app_global_del
-byte 0x414c474f41
+byte "ALGOA"
int 0x78
app_global_put
-byte 0x414c474f41
+byte "ALGOA"
app_global_del
int 1
`
- delta = testApp(t, source, ep)
- require.Len(t, delta.GlobalDelta, 1)
- require.Len(t, delta.LocalDeltas, 0)
+ delta = testApp(t, source, ep)
+ require.Len(t, delta.GlobalDelta, 1)
+ require.Len(t, delta.LocalDeltas, 0)
+ })
+ }
}
func TestAppLocalDelete(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- // check write/delete/read
- source := `int 0 // sender
-byte 0x414c474f // key "ALGO"
+ testLogicRange(t, 2, 0, func(t *testing.T, ep *EvalParams, txn *transactions.Transaction, ledger *Ledger) {
+ // check write/delete/read
+ source := `int 0 // sender
+byte "ALGO"
int 0x77 // value
app_local_put
-int 1
-byte 0x414c474f41 // key "ALGOA"
-byte 0x414c474f
+int 1 // other
+byte "ALGOA" // key "ALGOA"
+byte "ALGO"
app_local_put
int 0 // sender
-byte 0x414c474f
+byte "ALGO"
app_local_del
-int 1
-byte 0x414c474f41
+int 1 // other
+byte "ALGOA"
app_local_del
int 0 // sender
int 0 // app
-byte 0x414c474f
+byte "ALGO"
app_local_get_ex
bnz error
-int 1
+int 1 // other
int 100
-byte 0x414c474f41
+byte "ALGOA"
app_local_get_ex
bnz error
==
@@ -2108,168 +2370,181 @@ err
ok:
int 1
`
- txn := makeSampleAppl(100)
- ep := defaultEvalParams(txn)
- ledger := NewLedger(
- map[basics.Address]uint64{
- txn.Txn.Sender: 1,
- },
- )
- ep.Ledger = ledger
- ep.SigLedger = ledger
- ledger.NewApp(txn.Txn.Sender, 100, basics.AppParams{})
- ledger.NewLocals(txn.Txn.Sender, 100)
- ledger.NewAccount(txn.Txn.Receiver, 1)
- ledger.NewLocals(txn.Txn.Receiver, 100)
-
- ep.Trace = &strings.Builder{}
-
- delta := testApp(t, source, ep)
- require.Equal(t, 0, len(delta.GlobalDelta))
- require.Equal(t, 2, len(delta.LocalDeltas))
-
- ledger.Reset()
- // test that app_local_put and _app_local_del can use byte addresses
- delta = testApp(t, strings.Replace(source, "int 0 // sender", "byte \"aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00\"", -1), ep)
- // But won't even compile in old teal
- testProg(t, strings.Replace(source, "int 0 // sender", "byte \"aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00\"", -1), directRefEnabledVersion-1,
- Expect{4, "app_local_put arg 0 wanted..."}, Expect{11, "app_local_del arg 0 wanted..."})
- require.Equal(t, 0, len(delta.GlobalDelta))
- require.Equal(t, 2, len(delta.LocalDeltas))
+ txn.ApplicationID = 100
+ ledger.NewAccount(txn.Sender, 1)
+ ledger.NewApp(txn.Sender, 100, basics.AppParams{})
+ ledger.NewLocals(txn.Sender, 100)
+ ledger.NewAccount(txn.Receiver, 1)
+ ledger.NewLocals(txn.Receiver, 100)
+
+ ep.Trace = &strings.Builder{}
+
+ delta := testApp(t, source, ep)
+ require.Equal(t, 0, len(delta.GlobalDelta))
+ require.Equal(t, 2, len(delta.LocalDeltas))
+ ledger.Reset()
+
+ if ep.Proto.LogicSigVersion >= directRefEnabledVersion {
+ // test that app_local_put and _app_local_del can use byte addresses
+ withBytes := strings.ReplaceAll(source, "int 0 // sender", "txn Sender")
+ withBytes = strings.ReplaceAll(withBytes, "int 1 // other", "txn Accounts 1")
+ delta := testApp(t, withBytes, ep)
+ // But won't even compile in old teal
+ testProg(t, withBytes, directRefEnabledVersion-1,
+ exp(4, "app_local_put arg 0 wanted..."), exp(11, "app_local_del arg 0 wanted..."))
+ require.Equal(t, 0, len(delta.GlobalDelta))
+ require.Equal(t, 2, len(delta.LocalDeltas))
+ ledger.Reset()
+ }
- ledger.Reset()
- ledger.NoLocal(txn.Txn.Sender, 100, "ALGOA")
- ledger.NoLocal(txn.Txn.Sender, 100, "ALGO")
- ledger.NoLocal(txn.Txn.Receiver, 100, "ALGOA")
- ledger.NoLocal(txn.Txn.Receiver, 100, "ALGO")
+ ledger.NoLocal(txn.Sender, 100, "ALGOA")
+ ledger.NoLocal(txn.Sender, 100, "ALGO")
+ ledger.NoLocal(txn.Receiver, 100, "ALGOA")
+ ledger.NoLocal(txn.Receiver, 100, "ALGO")
- algoValue := basics.TealValue{Type: basics.TealUintType, Uint: 0x77}
- ledger.NewLocal(txn.Txn.Sender, 100, "ALGO", algoValue)
+ algoValue := basics.TealValue{Type: basics.TealUintType, Uint: 0x77}
+ ledger.NewLocal(txn.Sender, 100, "ALGO", algoValue)
- // check delete existing
- source = `int 0 // account
-byte 0x414c474f // key "ALGO"
+ // check delete existing
+ source = `txn Sender
+byte "ALGO"
app_local_del
-int 0
+txn Sender
int 100
-byte 0x414c474f
+byte "ALGO"
app_local_get_ex
== // two zeros
`
- delta = testApp(t, source, ep)
- require.Equal(t, 0, len(delta.GlobalDelta))
- require.Equal(t, 1, len(delta.LocalDeltas))
- vd := delta.LocalDeltas[0]["ALGO"]
- require.Equal(t, basics.DeleteAction, vd.Action)
- require.Equal(t, uint64(0), vd.Uint)
- require.Equal(t, "", vd.Bytes)
-
- ledger.Reset()
- ledger.NoLocal(txn.Txn.Sender, 100, "ALGOA")
- ledger.NoLocal(txn.Txn.Sender, 100, "ALGO")
-
- ledger.NewLocal(txn.Txn.Sender, 100, "ALGO", algoValue)
-
- // check delete and write non-existing
- source = `int 0 // account
-byte 0x414c474f41 // key "ALGOA"
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ source = strings.ReplaceAll(source, "txn Sender", "int 0")
+ }
+ delta = testApp(t, source, ep)
+ require.Equal(t, 0, len(delta.GlobalDelta))
+ require.Equal(t, 1, len(delta.LocalDeltas))
+ vd := delta.LocalDeltas[0]["ALGO"]
+ require.Equal(t, basics.DeleteAction, vd.Action)
+ require.Equal(t, uint64(0), vd.Uint)
+ require.Equal(t, "", vd.Bytes)
+
+ ledger.Reset()
+ ledger.NoLocal(txn.Sender, 100, "ALGOA")
+ ledger.NoLocal(txn.Sender, 100, "ALGO")
+
+ ledger.NewLocal(txn.Sender, 100, "ALGO", algoValue)
+
+ // check delete and write non-existing
+ source = `txn Sender
+byte "ALGOA"
app_local_del
+txn Sender
int 0
-int 0
-byte 0x414c474f41
+byte "ALGOA"
app_local_get_ex
== // two zeros
-int 0
-byte 0x414c474f41
+txn Sender
+byte "ALGOA"
int 0x78
app_local_put
`
- delta = testApp(t, source, ep)
- require.Equal(t, 0, len(delta.GlobalDelta))
- require.Equal(t, 1, len(delta.LocalDeltas))
- vd = delta.LocalDeltas[0]["ALGOA"]
- require.Equal(t, basics.SetUintAction, vd.Action)
- require.Equal(t, uint64(0x78), vd.Uint)
- require.Equal(t, "", vd.Bytes)
-
- ledger.Reset()
- ledger.NoLocal(txn.Txn.Sender, 100, "ALGOA")
- ledger.NoLocal(txn.Txn.Sender, 100, "ALGO")
-
- ledger.NewLocal(txn.Txn.Sender, 100, "ALGO", algoValue)
-
- // check delete and write existing
- source = `int 0 // account
-byte 0x414c474f // key "ALGO"
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ source = strings.ReplaceAll(source, "txn Sender", "int 0")
+ }
+ delta = testApp(t, source, ep)
+ require.Equal(t, 0, len(delta.GlobalDelta))
+ require.Equal(t, 1, len(delta.LocalDeltas))
+ vd = delta.LocalDeltas[0]["ALGOA"]
+ require.Equal(t, basics.SetUintAction, vd.Action)
+ require.Equal(t, uint64(0x78), vd.Uint)
+ require.Equal(t, "", vd.Bytes)
+
+ ledger.Reset()
+ ledger.NoLocal(txn.Sender, 100, "ALGOA")
+ ledger.NoLocal(txn.Sender, 100, "ALGO")
+
+ ledger.NewLocal(txn.Sender, 100, "ALGO", algoValue)
+
+ // check delete and write existing
+ source = `txn Sender
+byte "ALGO"
app_local_del
-int 0
-byte 0x414c474f
+txn Sender
+byte "ALGO"
int 0x78
app_local_put
int 1
`
- delta = testApp(t, source, ep)
- require.Equal(t, 0, len(delta.GlobalDelta))
- require.Equal(t, 1, len(delta.LocalDeltas))
- vd = delta.LocalDeltas[0]["ALGO"]
- require.Equal(t, basics.SetUintAction, vd.Action)
- require.Equal(t, uint64(0x78), vd.Uint)
- require.Equal(t, "", vd.Bytes)
-
- ledger.Reset()
- ledger.NoLocal(txn.Txn.Sender, 100, "ALGOA")
- ledger.NoLocal(txn.Txn.Sender, 100, "ALGO")
-
- ledger.NewLocal(txn.Txn.Sender, 100, "ALGO", algoValue)
-
- // check delete,write,delete existing
- source = `int 0 // account
-byte 0x414c474f // key "ALGO"
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ source = strings.ReplaceAll(source, "txn Sender", "int 0")
+ }
+ delta = testApp(t, source, ep)
+ require.Equal(t, 0, len(delta.GlobalDelta))
+ require.Equal(t, 1, len(delta.LocalDeltas))
+ vd = delta.LocalDeltas[0]["ALGO"]
+ require.Equal(t, basics.SetUintAction, vd.Action)
+ require.Equal(t, uint64(0x78), vd.Uint)
+ require.Equal(t, "", vd.Bytes)
+
+ ledger.Reset()
+ ledger.NoLocal(txn.Sender, 100, "ALGOA")
+ ledger.NoLocal(txn.Sender, 100, "ALGO")
+
+ ledger.NewLocal(txn.Sender, 100, "ALGO", algoValue)
+
+ // check delete,write,delete existing
+ source = `txn Sender
+byte "ALGO"
app_local_del
-int 0
-byte 0x414c474f
+txn Sender
+byte "ALGO"
int 0x78
app_local_put
-int 0
-byte 0x414c474f
+txn Sender
+byte "ALGO"
app_local_del
int 1
`
- delta = testApp(t, source, ep)
- require.Equal(t, 0, len(delta.GlobalDelta))
- require.Equal(t, 1, len(delta.LocalDeltas))
- vd = delta.LocalDeltas[0]["ALGO"]
- require.Equal(t, basics.DeleteAction, vd.Action)
- require.Equal(t, uint64(0), vd.Uint)
- require.Equal(t, "", vd.Bytes)
-
- ledger.Reset()
- ledger.NoLocal(txn.Txn.Sender, 100, "ALGOA")
- ledger.NoLocal(txn.Txn.Sender, 100, "ALGO")
-
- ledger.NewLocal(txn.Txn.Sender, 100, "ALGO", algoValue)
-
- // check delete, write, delete non-existing
- source = `int 0 // account
-byte 0x414c474f41 // key "ALGOA"
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ source = strings.ReplaceAll(source, "txn Sender", "int 0")
+ }
+ delta = testApp(t, source, ep)
+ require.Equal(t, 0, len(delta.GlobalDelta))
+ require.Equal(t, 1, len(delta.LocalDeltas))
+ vd = delta.LocalDeltas[0]["ALGO"]
+ require.Equal(t, basics.DeleteAction, vd.Action)
+ require.Equal(t, uint64(0), vd.Uint)
+ require.Equal(t, "", vd.Bytes)
+
+ ledger.Reset()
+ ledger.NoLocal(txn.Sender, 100, "ALGOA")
+ ledger.NoLocal(txn.Sender, 100, "ALGO")
+
+ ledger.NewLocal(txn.Sender, 100, "ALGO", algoValue)
+
+ // check delete, write, delete non-existing
+ source = `txn Sender
+byte "ALGOA"
app_local_del
-int 0
-byte 0x414c474f41
+txn Sender
+byte "ALGOA"
int 0x78
app_local_put
-int 0
-byte 0x414c474f41
+txn Sender
+byte "ALGOA"
app_local_del
int 1
`
- delta = testApp(t, source, ep)
- require.Equal(t, 0, len(delta.GlobalDelta))
- require.Equal(t, 1, len(delta.LocalDeltas))
- require.Equal(t, 1, len(delta.LocalDeltas[0]))
+ if ep.Proto.LogicSigVersion < directRefEnabledVersion {
+ source = strings.ReplaceAll(source, "txn Sender", "int 0")
+ }
+ delta = testApp(t, source, ep)
+ require.Equal(t, 0, len(delta.GlobalDelta))
+ require.Equal(t, 1, len(delta.LocalDeltas))
+ require.Equal(t, 1, len(delta.LocalDeltas[0]))
+ })
}
-func TestEnumFieldErrors(t *testing.T) { // nolint:paralleltest // manipulates globalFieldSpecs
+func TestEnumFieldErrors(t *testing.T) { // nolint:paralleltest // manipulates txnFieldSpecs
partitiontest.PartitionTest(t)
source := `txn Amount`
@@ -2313,7 +2588,7 @@ func TestEnumFieldErrors(t *testing.T) { // nolint:paralleltest // manipulates g
}
ledger.NewAsset(tx.Sender, 55, params)
- source = `int 0
+ source = `txn Sender
int 55
asset_holding_get AssetBalance
assert
@@ -2328,7 +2603,7 @@ assert
testApp(t, source, ep, "AssetBalance expected field type is []byte but got uint64")
- source = `int 0
+ source = `int 55
asset_params_get AssetTotal
assert
`
@@ -2348,10 +2623,10 @@ func TestReturnTypes(t *testing.T) {
t.Parallel()
// Ensure all opcodes return values they are supposed to according to the OpSpecs table
- typeToArg := map[StackType]string{
- StackUint64: "int 1\n",
- StackAny: "int 1\n",
- StackBytes: "byte 0x33343536\n", // Which is the string "3456"
+ typeToArg := map[avmType]string{
+ avmUint64: "int 1\n",
+ avmAny: "int 1\n",
+ avmBytes: "byte 0x33343536\n", // Which is the string "3456"
}
// We try to form a snippet that will test every opcode, by sandwiching it
@@ -2362,40 +2637,52 @@ func TestReturnTypes(t *testing.T) {
// opcodes that need to set up their own stack inputs, a ": at the front of
// the string means "start with an empty stack".
specialCmd := map[string]string{
- "txn": "txn Sender",
- "txna": "txna ApplicationArgs 0",
- "gtxn": "gtxn 0 Sender",
- "gtxna": "gtxna 0 ApplicationArgs 0",
- "global": "global MinTxnFee",
- "gaids": ": int 0; gaids",
- "gloads": ": int 0; gloads 0", // Needs txn index = 0 to work
- "gloadss": ": int 0; int 1; gloadss", // Needs txn index = 0 to work
- "intc": "intcblock 0; intc 0",
- "intc_0": "intcblock 0; intc_0",
- "intc_1": "intcblock 0 0; intc_1",
- "intc_2": "intcblock 0 0 0; intc_2",
- "intc_3": "intcblock 0 0 0 0; intc_3",
- "bytec": "bytecblock 0x32; bytec 0",
- "bytec_0": "bytecblock 0x32; bytec_0",
- "bytec_1": "bytecblock 0x32 0x33; bytec_1",
- "bytec_2": "bytecblock 0x32 0x33 0x34; bytec_2",
- "bytec_3": "bytecblock 0x32 0x33 0x34 0x35; bytec_3",
- "substring": "substring 0 2",
- "extract_uint32": ": byte 0x0102030405; int 1; extract_uint32",
- "extract_uint64": ": byte 0x010203040506070809; int 1; extract_uint64",
- "replace2": ": byte 0x0102030405; byte 0x0809; replace2 2",
- "replace3": ": byte 0x0102030405; int 2; byte 0x0809; replace3",
- "asset_params_get": "asset_params_get AssetUnitName",
- "asset_holding_get": "asset_holding_get AssetBalance",
- "gtxns": "gtxns Sender",
- "gtxnsa": ": int 0; gtxnsa ApplicationArgs 0",
+ "txn": "txn Sender",
+ "txna": "txna ApplicationArgs 0",
+ "gtxn": "gtxn 0 Sender",
+ "gtxna": "gtxna 0 ApplicationArgs 0",
+ "global": "global MinTxnFee",
+ "gaids": ": int 0; gaids",
+ "gloads": ": int 0; gloads 0", // Needs txn index = 0 to work
+ "gloadss": ": int 0; int 1; gloadss", // Needs txn index = 0 to work
+ "intc": "intcblock 0; intc 0",
+ "intc_0": "intcblock 0; intc_0",
+ "intc_1": "intcblock 0 0; intc_1",
+ "intc_2": "intcblock 0 0 0; intc_2",
+ "intc_3": "intcblock 0 0 0 0; intc_3",
+ "bytec": "bytecblock 0x32; bytec 0",
+ "bytec_0": "bytecblock 0x32; bytec_0",
+ "bytec_1": "bytecblock 0x32 0x33; bytec_1",
+ "bytec_2": "bytecblock 0x32 0x33 0x34; bytec_2",
+ "bytec_3": "bytecblock 0x32 0x33 0x34 0x35; bytec_3",
+ "substring": "substring 0 2",
+ "extract_uint32": ": byte 0x0102030405; int 1; extract_uint32",
+ "extract_uint64": ": byte 0x010203040506070809; int 1; extract_uint64",
+ "replace2": ": byte 0x0102030405; byte 0x0809; replace2 2",
+ "replace3": ": byte 0x0102030405; int 2; byte 0x0809; replace3",
+ "gtxns": "gtxns Sender",
+ "gtxnsa": ": int 0; gtxnsa ApplicationArgs 0",
+ "extract": "extract 0 2",
+ "txnas": "txnas ApplicationArgs",
+ "gtxnas": "gtxnas 0 ApplicationArgs",
+ "gtxnsas": ": int 0; int 0; gtxnsas ApplicationArgs",
+ "divw": ": int 1; int 2; int 3; divw",
+
+ // opcodes that require addresses, not just bytes
+ "balance": ": txn Sender; balance",
+ "min_balance": ": txn Sender; min_balance",
+ "acct_params_get": ": txn Sender; acct_params_get AcctMinBalance",
+
+ // Use "bury" here to take advantage of args pushed on stack by test
+ "app_local_get": "txn Accounts 1; bury 2; app_local_get",
+ "app_local_get_ex": "txn Accounts 1; bury 3; app_local_get_ex",
+ "app_local_del": "txn Accounts 1; bury 2; app_local_del",
+ "app_local_put": "txn Accounts 1; bury 3; app_local_put",
+ "app_opted_in": "txn Sender; bury 2; app_opted_in",
+
+ "asset_params_get": ": int 400; asset_params_get AssetUnitName",
+ "asset_holding_get": ": txn Sender; int 400; asset_holding_get AssetBalance",
"app_params_get": "app_params_get AppGlobalNumUint",
- "acct_params_get": "acct_params_get AcctMinBalance",
- "extract": "extract 0 2",
- "txnas": "txnas ApplicationArgs",
- "gtxnas": "gtxnas 0 ApplicationArgs",
- "gtxnsas": ": int 0; int 0; gtxnsas ApplicationArgs",
- "divw": ": int 1; int 2; int 3; divw",
"itxn_field": "itxn_begin; itxn_field TypeEnum",
"itxn_next": "itxn_begin; int pay; itxn_field TypeEnum; itxn_next",
@@ -2497,7 +2784,7 @@ func TestReturnTypes(t *testing.T) {
var sb strings.Builder
if provideStackInput {
for _, t := range spec.Arg.Types {
- sb.WriteString(typeToArg[t])
+ sb.WriteString(typeToArg[t.AVMType])
}
}
sb.WriteString(cmd + "\n")
@@ -2506,9 +2793,9 @@ func TestReturnTypes(t *testing.T) {
ep, tx, ledger := makeSampleEnv()
tx.Type = protocol.ApplicationCallTx
- tx.ApplicationID = 1
+ tx.ApplicationID = 300
tx.ForeignApps = []basics.AppIndex{tx.ApplicationID}
- tx.ForeignAssets = []basics.AssetIndex{basics.AssetIndex(1), basics.AssetIndex(1)}
+ tx.ForeignAssets = []basics.AssetIndex{400}
tx.Boxes = []transactions.BoxRef{{
Name: []byte("3456"),
}}
@@ -2536,20 +2823,20 @@ func TestReturnTypes(t *testing.T) {
Freeze: tx.Receiver,
Clawback: tx.Receiver,
}
- ledger.NewAsset(tx.Sender, 1, params)
- ledger.NewApp(tx.Sender, 1, basics.AppParams{})
+ ledger.NewAsset(tx.Sender, 400, params)
+ ledger.NewApp(tx.Sender, 300, basics.AppParams{})
ledger.NewAccount(tx.Receiver, 1000000)
- ledger.NewLocals(tx.Receiver, 1)
+ ledger.NewLocals(tx.Receiver, 300)
key, err := hex.DecodeString("33343536")
require.NoError(t, err)
algoValue := basics.TealValue{Type: basics.TealUintType, Uint: 0x77}
- ledger.NewLocal(tx.Receiver, 1, string(key), algoValue)
- ledger.NewAccount(appAddr(1), 1000000)
+ ledger.NewLocal(tx.Receiver, 300, string(key), algoValue)
+ ledger.NewAccount(appAddr(300), 1000000)
ep.reset() // for Trace and budget isolation
ep.pastScratch[0] = &scratchSpace{} // for gload
// these allows the box_* opcodes that to work
- ledger.CreateBox(1, "3456", 10)
+ ledger.CreateBox(300, "3456", 10)
ep.ioBudget = 50
cx := EvalContext{
@@ -2557,7 +2844,7 @@ func TestReturnTypes(t *testing.T) {
runModeFlags: m,
groupIndex: 1,
txn: &ep.TxnGroup[1],
- appID: 1,
+ appID: 300,
}
// These set conditions for some ops that examine the group.
@@ -2580,10 +2867,10 @@ func TestReturnTypes(t *testing.T) {
}
require.Len(t, cx.stack, len(spec.Return.Types), "%s", ep.Trace)
for i := 0; i < len(spec.Return.Types); i++ {
- stackType := cx.stack[i].argType()
+ stackType := cx.stack[i].stackType()
retType := spec.Return.Types[i]
require.True(
- t, typecheck(retType, stackType),
+ t, stackType.overlaps(retType),
"%s expected to return %s but actual is %s", spec.Name, retType, stackType,
)
}
@@ -2613,9 +2900,9 @@ func TestTxnEffects(t *testing.T) {
// Look past the logs of tx 0
testApps(t, []string{"byte 0x37; log; int 1", "gtxna 0 Logs 1; byte 0x37; =="}, nil, AssemblerMaxVersion, nil,
- Expect{1, "invalid Logs index 1"})
+ exp(1, "invalid Logs index 1"))
testApps(t, []string{"byte 0x37; log; int 1", "int 6; gtxnas 0 Logs; byte 0x37; =="}, nil, AssemblerMaxVersion, nil,
- Expect{1, "invalid Logs index 6"})
+ exp(1, "invalid Logs index 6"))
}
func TestRound(t *testing.T) {
@@ -2715,7 +3002,7 @@ func TestPooledAppCallsVerifyOp(t *testing.T) {
call := transactions.SignedTxn{Txn: transactions.Transaction{Type: protocol.ApplicationCallTx}}
// Simulate test with 2 grouped txn
testApps(t, []string{source, ""}, []transactions.SignedTxn{call, call}, LogicVersion, ledger,
- Expect{0, "pc=107 dynamic cost budget exceeded, executing ed25519verify: local program cost was 5"})
+ exp(0, "pc=107 dynamic cost budget exceeded, executing ed25519verify: local program cost was 5"))
// Simulate test with 3 grouped txn
testApps(t, []string{source, "", ""}, []transactions.SignedTxn{call, call, call}, LogicVersion, ledger)
@@ -2762,11 +3049,11 @@ int 695
testApp(t, source, defaultEvalParams())
}
-func TestSelfMutate(t *testing.T) {
+func TestSelfMutateV8(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- ep, _, ledger := makeSampleEnv()
+ ep, _, ledger := makeSampleEnvWithVersion(8)
/* In order to test the added protection of mutableAccountReference, we're
going to set up a ledger in which an app account is opted into
@@ -2791,7 +3078,7 @@ app_local_del
`
testApp(t, source, ep, "invalid Account reference for mutation")
- /* But let's just check normal access is working properly. */
+ /* But let's just check read access is working properly. */
source = `
global CurrentApplicationAddress
byte "hey"
@@ -2801,3 +3088,178 @@ int 77
`
testApp(t, source, ep)
}
+
+// TestSelfMutateV9AndUp tests that apps can mutate their own app's local state
+// starting with v9. Includes tests to the EvalDelta created.
+func TestSelfMutateV9AndUp(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // start at 9, when such mutation became legal
+ testLogicRange(t, 9, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ /* In order to test that apps can now mutate their own app's local state,
+ we're going to set up a ledger in which an app account is opted into
+ itself. */
+ ledger.NewLocals(basics.AppIndex(888).Address(), 888)
+ ledger.NewLocal(basics.AppIndex(888).Address(), 888, "hey",
+ basics.TealValue{Type: basics.TealUintType, Uint: 77})
+
+ // and we'll modify the passed account's locals, to better check the ED
+ ledger.NewLocals(tx.Accounts[0], 888)
+
+ source := `
+global CurrentApplicationAddress
+byte "hey"
+int 42
+app_local_put
+txn Accounts 1
+byte "acct"
+int 43
+app_local_put
+int 1
+`
+ ed := testApp(t, source, ep)
+ require.Len(t, tx.Accounts, 1) // Sender + 1 tx.Accounts means LocalDelta index should be 2
+ require.Equal(t, map[uint64]basics.StateDelta{
+ 1: {
+ "acct": {
+ Action: basics.SetUintAction,
+ Uint: 43,
+ },
+ },
+ 2: {
+ "hey": {
+ Action: basics.SetUintAction,
+ Uint: 42,
+ },
+ },
+ }, ed.LocalDeltas)
+ require.Equal(t, []basics.Address{tx.ApplicationID.Address()}, ed.SharedAccts)
+
+ /* Confirm it worked. */
+ source = `
+global CurrentApplicationAddress
+byte "hey"
+app_local_get
+int 42
+==
+`
+ testApp(t, source, ep)
+
+ source = `
+global CurrentApplicationAddress
+byte "hey"
+int 10
+app_local_put // this will get wiped out by del
+global CurrentApplicationAddress
+byte "hey"
+app_local_del
+txn Accounts 1
+byte "acct"
+int 7
+app_local_put
+int 1
+`
+ ed = testApp(t, source, ep)
+ require.Len(t, tx.Accounts, 1) // Sender + 1 tx.Accounts means LocalDelta index should be 2
+ require.Equal(t, map[uint64]basics.StateDelta{
+ 1: {
+ "acct": {
+ Action: basics.SetUintAction,
+ Uint: 7,
+ },
+ },
+ 2: {
+ "hey": {
+ Action: basics.DeleteAction,
+ },
+ },
+ }, ed.LocalDeltas)
+ require.Equal(t, []basics.Address{tx.ApplicationID.Address()}, ed.SharedAccts)
+
+ // Now, repeat the "put" test with multiple keys, to ensure only one
+ // address is added to SharedAccts and we'll modify the Sender too, to
+ // better check the ED
+ ledger.NewLocals(tx.Sender, 888)
+
+ source = `
+txn Sender
+byte "hey"
+int 40
+app_local_put
+
+global CurrentApplicationAddress
+byte "hey"
+int 42
+app_local_put
+
+global CurrentApplicationAddress
+byte "joe"
+int 21
+app_local_put
+int 1
+`
+ ed = testApp(t, source, ep)
+ require.Len(t, tx.Accounts, 1) // Sender + 1 tx.Accounts means LocalDelta index should be 2
+ require.Equal(t, map[uint64]basics.StateDelta{
+ 0: {
+ "hey": {
+ Action: basics.SetUintAction,
+ Uint: 40,
+ },
+ },
+ 2: {
+ "hey": {
+ Action: basics.SetUintAction,
+ Uint: 42,
+ },
+ "joe": {
+ Action: basics.SetUintAction,
+ Uint: 21,
+ },
+ },
+ }, ed.LocalDeltas)
+
+ require.Equal(t, []basics.Address{tx.ApplicationID.Address()}, ed.SharedAccts)
+ })
+}
+
+func TestInfiniteRecursion(t *testing.T) { // nolint:paralleltest // manipulates maxAppCallDepth
+ partitiontest.PartitionTest(t)
+
+ // test needs AppApprovalProgram, available in 7
+ TestLogicRange(t, 7, 0, func(t *testing.T, ep *EvalParams, tx *transactions.Transaction, ledger *Ledger) {
+ v := ep.Proto.LogicSigVersion
+ source := `
+itxn_begin
+int appl; itxn_field TypeEnum
+int 0; app_params_get AppApprovalProgram
+assert
+itxn_field ApprovalProgram
+
+int 0; app_params_get AppClearStateProgram
+assert
+itxn_field ClearStateProgram
+
+itxn_submit
+`
+ // This app looks itself up in the ledger, so we need to put it in there.
+ ledger.NewApp(tx.Sender, 888, basics.AppParams{
+ ApprovalProgram: testProg(t, source, v).Program,
+ ClearStateProgram: testProg(t, "int 1", v).Program,
+ })
+ // We're testing if this can recur forever. It's hard to fund all these
+ // apps, but we can put a huge credit in the ep.
+ *ep.FeeCredit = 1_000_000_000
+
+ testApp(t, source, ep, "appl depth (8) exceeded")
+
+ was := maxAppCallDepth
+ defer func() {
+ maxAppCallDepth = was
+ }()
+ maxAppCallDepth = 10_000_000
+
+ testApp(t, source, ep, "too many inner transactions 1 with 0 left")
+ })
+}
diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go
index 5d5f32128..de99cab3a 100644
--- a/data/transactions/logic/eval_test.go
+++ b/data/transactions/logic/eval_test.go
@@ -168,12 +168,9 @@ func (ep *EvalParams) reset() {
ep.TxnGroup[i].ApplyData = transactions.ApplyData{}
}
if ep.available != nil {
- ep.available.apps = nil
- ep.available.asas = nil
- // reinitialize boxes because evaluation can add box refs for app creates.
available := NewEvalParams(ep.TxnGroup, ep.Proto, ep.Specials).available
if available != nil {
- ep.available.boxes = available.boxes
+ ep.available = available
}
ep.available.dirtyBytes = 0
}
@@ -624,7 +621,7 @@ after:
dup
pop
`
- testProg(t, code, LogicVersion, Expect{12, "+ expects 2 stack arguments..."})
+ testProg(t, code, LogicVersion, exp(12, "+ expects 2 stack arguments..."))
testAccepts(t, notrack(code), 1)
}
@@ -1153,6 +1150,10 @@ const globalV9TestProgram = globalV8TestProgram + `
// No new globals in v9
`
+const globalV10TestProgram = globalV9TestProgram + `
+// No new globals in v10
+`
+
func TestGlobal(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -1163,16 +1164,17 @@ func TestGlobal(t *testing.T) {
}
// Associate the highest allowed global constant with each version's test program
tests := map[uint64]desc{
- 0: {GroupSize, globalV1TestProgram},
- 1: {GroupSize, globalV1TestProgram},
- 2: {CurrentApplicationID, globalV2TestProgram},
- 3: {CreatorAddress, globalV3TestProgram},
- 4: {CreatorAddress, globalV4TestProgram},
- 5: {GroupID, globalV5TestProgram},
- 6: {CallerApplicationAddress, globalV6TestProgram},
- 7: {CallerApplicationAddress, globalV7TestProgram},
- 8: {CallerApplicationAddress, globalV8TestProgram},
- 9: {CallerApplicationAddress, globalV9TestProgram},
+ 0: {GroupSize, globalV1TestProgram},
+ 1: {GroupSize, globalV1TestProgram},
+ 2: {CurrentApplicationID, globalV2TestProgram},
+ 3: {CreatorAddress, globalV3TestProgram},
+ 4: {CreatorAddress, globalV4TestProgram},
+ 5: {GroupID, globalV5TestProgram},
+ 6: {CallerApplicationAddress, globalV6TestProgram},
+ 7: {CallerApplicationAddress, globalV7TestProgram},
+ 8: {CallerApplicationAddress, globalV8TestProgram},
+ 9: {CallerApplicationAddress, globalV9TestProgram},
+ 10: {CallerApplicationAddress, globalV10TestProgram},
}
// tests keys are versions so they must be in a range 1..AssemblerMaxVersion plus zero version
require.LessOrEqual(t, len(tests), AssemblerMaxVersion+1)
@@ -1676,6 +1678,11 @@ assert
int 1
`
+const testTxnProgramTextV10 = testTxnProgramTextV9 + `
+assert
+int 1
+`
+
func makeSampleTxn() transactions.SignedTxn {
var txn transactions.SignedTxn
copy(txn.Txn.Sender[:], []byte("aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00"))
@@ -1779,15 +1786,16 @@ func TestTxn(t *testing.T) {
t.Parallel()
tests := map[uint64]string{
- 1: testTxnProgramTextV1,
- 2: testTxnProgramTextV2,
- 3: testTxnProgramTextV3,
- 4: testTxnProgramTextV4,
- 5: testTxnProgramTextV5,
- 6: testTxnProgramTextV6,
- 7: testTxnProgramTextV7,
- 8: testTxnProgramTextV8,
- 9: testTxnProgramTextV9,
+ 1: testTxnProgramTextV1,
+ 2: testTxnProgramTextV2,
+ 3: testTxnProgramTextV3,
+ 4: testTxnProgramTextV4,
+ 5: testTxnProgramTextV5,
+ 6: testTxnProgramTextV6,
+ 7: testTxnProgramTextV7,
+ 8: testTxnProgramTextV8,
+ 9: testTxnProgramTextV9,
+ 10: testTxnProgramTextV10,
}
for i, txnField := range TxnFieldNames {
@@ -2466,17 +2474,17 @@ func TestSubstringFlop(t *testing.T) {
// fails in compiler
testProg(t, `byte 0xf000000000000000
substring
-len`, 2, Expect{2, "substring expects 2 immediate arguments"})
+len`, 2, exp(2, "substring expects 2 immediate arguments"))
// fails in compiler
testProg(t, `byte 0xf000000000000000
substring 1
-len`, 2, Expect{2, "substring expects 2 immediate arguments"})
+len`, 2, exp(2, "substring expects 2 immediate arguments"))
// fails in compiler
testProg(t, `byte 0xf000000000000000
substring 4 2
-len`, 2, Expect{2, "substring end is before start"})
+len`, 2, exp(2, "substring end is before start"))
// fails at runtime
testPanics(t, `byte 0xf000000000000000
@@ -2531,17 +2539,17 @@ func TestExtractFlop(t *testing.T) {
// fails in compiler
testProg(t, `byte 0xf000000000000000
extract
- len`, 5, Expect{2, "extract without immediates expects 3 stack arguments but stack height is 1"})
+ len`, 5, exp(2, "extract without immediates expects 3 stack arguments but stack height is 1"))
testProg(t, `byte 0xf000000000000000
extract 1
- len`, 5, Expect{2, "extract expects 0 or 2 immediate arguments"})
+ len`, 5, exp(2, "extract expects 0 or 2 immediate arguments"))
testProg(t, `byte 0xf000000000000000
int 0
int 5
extract3 1 2
- len`, 5, Expect{4, "extract3 expects 0 immediate arguments"})
+ len`, 5, exp(4, "extract3 expects 0 immediate arguments"))
// fails at runtime
err := testPanics(t, `byte 0xf000000000000000
@@ -2743,7 +2751,7 @@ func TestGload(t *testing.T) {
}
if testCase.errContains != "" {
- testApps(t, sources, txgroup, LogicVersion, nil, Expect{testCase.errTxn, testCase.errContains})
+ testApps(t, sources, txgroup, LogicVersion, nil, exp(testCase.errTxn, testCase.errContains))
} else {
testApps(t, sources, txgroup, LogicVersion, nil)
}
@@ -2964,7 +2972,7 @@ func isNotPanic(t *testing.T, err error) {
if err == nil {
return
}
- if pe, ok := err.(PanicError); ok {
+ if pe, ok := err.(panicError); ok {
t.Error(pe)
}
}
@@ -3191,14 +3199,10 @@ func TestPanic(t *testing.T) { //nolint:paralleltest // Uses withPanicOpcode
params := defaultEvalParams()
params.TxnGroup[0].Lsig.Logic = ops.Program
err := CheckSignature(0, params)
- require.Error(t, err)
- if pe, ok := err.(PanicError); ok {
- require.Equal(t, panicString, pe.PanicValue)
- pes := pe.Error()
- require.True(t, strings.Contains(pes, "panic"))
- } else {
- t.Errorf("expected PanicError object but got %T %#v", err, err)
- }
+ var pe panicError
+ require.ErrorAs(t, err, &pe)
+ require.Equal(t, panicString, pe.PanicValue)
+ require.ErrorContains(t, pe, "panic")
var txn transactions.SignedTxn
txn.Lsig.Logic = ops.Program
@@ -3209,13 +3213,9 @@ func TestPanic(t *testing.T) { //nolint:paralleltest // Uses withPanicOpcode
t.Log(params.Trace.String())
}
require.False(t, pass)
- if pe, ok := err.(PanicError); ok {
- require.Equal(t, panicString, pe.PanicValue)
- pes := pe.Error()
- require.True(t, strings.Contains(pes, "panic"))
- } else {
- t.Errorf("expected PanicError object but got %T %#v", err, err)
- }
+ require.ErrorAs(t, err, &pe)
+ require.Equal(t, panicString, pe.PanicValue)
+ require.ErrorContains(t, pe, "panic")
if v >= appsEnabledVersion {
txn = transactions.SignedTxn{
@@ -3227,13 +3227,9 @@ func TestPanic(t *testing.T) { //nolint:paralleltest // Uses withPanicOpcode
params.Ledger = NewLedger(nil)
pass, err = EvalApp(ops.Program, 0, 1, params)
require.False(t, pass)
- if pe, ok := err.(PanicError); ok {
- require.Equal(t, panicString, pe.PanicValue)
- pes := pe.Error()
- require.True(t, strings.Contains(pes, "panic"))
- } else {
- t.Errorf("expected PanicError object but got %T %#v", err, err)
- }
+ require.ErrorAs(t, err, &pe)
+ require.Equal(t, panicString, pe.PanicValue)
+ require.ErrorContains(t, pe, "panic")
}
})
})
@@ -3378,10 +3374,11 @@ intc_1
import random
def foo():
- for i in range(64):
- print('int {}'.format(random.randint(0,0x01ffffffffffffff)))
- for i in range(63):
- print('+')
+
+ for i in range(64):
+ print('int {}'.format(random.randint(0,0x01ffffffffffffff)))
+ for i in range(63):
+ print('+')
*/
const addBenchmarkSource = `int 20472989571761113
int 80135167795737348
@@ -3516,10 +3513,11 @@ int 28939890412103745
import random
def foo():
- print('int {}'.format(random.randint(0,0x01ffffffffffffff)))
- for i in range(63):
- print('int {}'.format(random.randint(0,0x01ffffffffffffff)))
- print('+')
+
+ print('int {}'.format(random.randint(0,0x01ffffffffffffff)))
+ for i in range(63):
+ print('int {}'.format(random.randint(0,0x01ffffffffffffff)))
+ print('+')
*/
const addBenchmark2Source = `int 8371863094338737
int 29595196041051360
@@ -4172,13 +4170,13 @@ func TestArgType(t *testing.T) {
t.Parallel()
var sv stackValue
- require.Equal(t, StackUint64, sv.argType())
+ require.Equal(t, avmUint64, sv.avmType())
sv.Bytes = []byte("")
- require.Equal(t, StackBytes, sv.argType())
+ require.Equal(t, avmBytes, sv.avmType())
sv.Uint = 1
- require.Equal(t, StackBytes, sv.argType())
+ require.Equal(t, avmBytes, sv.avmType())
sv.Bytes = nil
- require.Equal(t, StackUint64, sv.argType())
+ require.Equal(t, avmUint64, sv.avmType())
}
func TestApplicationsDisallowOldTeal(t *testing.T) {
@@ -4312,7 +4310,7 @@ func TestAllowedOpcodesV2(t *testing.T) {
"gtxn": true,
}
- ep := defaultEvalParams()
+ ep := defaultEvalParamsWithVersion(2)
cnt := 0
for _, spec := range OpSpecs {
@@ -4320,7 +4318,7 @@ func TestAllowedOpcodesV2(t *testing.T) {
source, ok := tests[spec.Name]
require.True(t, ok, "Missed opcode in the test: %s", spec.Name)
require.Contains(t, source, spec.Name)
- ops := testProg(t, source, AssemblerMaxVersion)
+ ops := testProg(t, source, 2)
// all opcodes allowed in stateful mode so use CheckStateful/EvalContract
err := CheckContract(ops.Program, ep)
require.NoError(t, err, source)
@@ -4365,7 +4363,7 @@ func TestAllowedOpcodesV3(t *testing.T) {
"pushbytes": `pushbytes "stringsfail?"`,
}
- ep := defaultEvalParams()
+ ep := defaultEvalParamsWithVersion(3)
cnt := 0
for _, spec := range OpSpecs {
@@ -4373,7 +4371,7 @@ func TestAllowedOpcodesV3(t *testing.T) {
source, ok := tests[spec.Name]
require.True(t, ok, "Missed opcode in the test: %s", spec.Name)
require.Contains(t, source, spec.Name)
- ops := testProg(t, source, AssemblerMaxVersion)
+ ops := testProg(t, source, 3)
// all opcodes allowed in stateful mode so use CheckStateful/EvalContract
testAppBytes(t, ops.Program, ep, "REJECT")
@@ -4439,7 +4437,7 @@ func testEvaluation(t *testing.T, program string, introduced uint64, tester eval
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
t.Helper()
if v < introduced {
- testProg(t, notrack(program), v, Expect{0, "...was introduced..."})
+ testProg(t, notrack(program), v, exp(0, "...was introduced..."))
return
}
ops := testProg(t, program, v)
@@ -4634,7 +4632,7 @@ func TestBury(t *testing.T) {
// bury 0 panics
source := "int 3; int 2; int 7; bury 0; int 1; return"
- testProg(t, source, 8, Expect{1, "bury 0 always fails"})
+ testProg(t, source, 8, exp(1, "bury 0 always fails"))
testPanics(t, notrack("int 3; int 2; int 7; bury 0; int 1; return"), 8, "bury outside stack")
// bury 1 pops the ToS and replaces the thing "1 down", which becomes the new ToS
@@ -4919,7 +4917,7 @@ func TestBytesMath(t *testing.T) {
// 64 byte long inputs are accepted, even if they produce longer outputs
testAccepts(t, fmt.Sprintf("byte 0x%s; byte 0x10; b+; len; int 65; ==", effs), 4)
// 65 byte inputs are not ok.
- testPanics(t, fmt.Sprintf("byte 0x%s00; byte 0x10; b-; len; int 65; ==", effs), 4)
+ testPanics(t, NoTrack(fmt.Sprintf("byte 0x%s00; byte 0x10; b-; len; int 65; ==", effs)), 4)
testAccepts(t, `byte 0x01; byte 0x01; b-; byte ""; ==`, 4)
testAccepts(t, "byte 0x0200; byte 0x01; b-; byte 0x01FF; ==", 4)
@@ -5023,7 +5021,7 @@ func TestBytesBits(t *testing.T) {
testAccepts(t, "int 33; bzero; byte 0x000000000000000000000000000000000000000000000000000000000000000000; ==", 4)
testAccepts(t, "int 4096; bzero; len; int 4096; ==", 4)
- testPanics(t, "int 4097; bzero; len; int 4097; ==", 4)
+ testPanics(t, NoTrack("int 4097; bzero; len; int 4097; =="), 4)
}
func TestBytesConversions(t *testing.T) {
@@ -5153,9 +5151,9 @@ func TestPcDetails(t *testing.T) {
pc int
det string
}{
- {"int 1; int 2; -", 5, "pushint 1\npushint 2\n-\n"},
- {"int 1; err", 3, "pushint 1\nerr\n"},
- {"int 1; dup; int 2; -; +", 6, "dup\npushint 2\n-\n"},
+ {"int 1; int 2; -", 5, "pushint 1; pushint 2; -"},
+ {"int 1; err", 3, "pushint 1; err"},
+ {"int 1; dup; int 2; -; +", 6, "dup; pushint 2; -"},
{"b end; end:", 4, ""},
}
for i, test := range tests {
@@ -5173,7 +5171,7 @@ func TestPcDetails(t *testing.T) {
assert.Equal(t, test.pc, cx.pc, ep.Trace.String())
- pc, det := cx.PcDetails()
+ pc, det := cx.pcDetails()
assert.Equal(t, test.pc, pc)
assert.Equal(t, test.det, det)
})
@@ -5284,13 +5282,13 @@ By Herman Melville`, "",
source := fmt.Sprintf(template, hex.EncodeToString([]byte(tc.decoded)), hex.EncodeToString([]byte(tc.encoded)), tc.alph)
if tc.error == "" {
if LogicVersion < fidoVersion {
- testProg(t, source, AssemblerMaxVersion, Expect{0, "unknown opcode..."})
+ testProg(t, source, AssemblerMaxVersion, exp(0, "unknown opcode..."))
} else {
testAccepts(t, source, fidoVersion)
}
} else {
if LogicVersion < fidoVersion {
- testProg(t, source, AssemblerMaxVersion, Expect{0, "unknown opcode..."})
+ testProg(t, source, AssemblerMaxVersion, exp(0, "unknown opcode..."))
} else {
err := testPanics(t, source, fidoVersion)
require.Error(t, err)
@@ -5407,7 +5405,7 @@ func TestOpJSONRef(t *testing.T) {
ep.SigLedger = ledger
testCases := []struct {
source string
- previousVersErrors []Expect
+ previousVersErrors []expect
}{
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\":3}, \"key5\": 18446744073709551615 }";
@@ -5415,7 +5413,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64;
int 0;
==`,
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": 3}, \"key5\": 18446744073709551615 }";
@@ -5423,7 +5421,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64;
int 18446744073709551615; //max uint64 value
==`,
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": 3}, \"key5\": 18446744073709551615 }";
@@ -5431,7 +5429,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString;
byte "algo";
==`,
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"\\u0061\\u006C\\u0067\\u006F\",\"key2\":{\"key3\": \"teal\", \"key4\": 3}, \"key5\": 18446744073709551615 }";
@@ -5439,7 +5437,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString;
byte "algo";
==`,
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": {\"key40\": 10}}, \"key5\": 18446744073709551615 }";
@@ -5451,7 +5449,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64
int 10
==`,
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}, {5, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref"), exp(5, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": {\"key40\": 10}}, \"key5\": 18446744073709551615 }";
@@ -5461,7 +5459,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString;
byte "teal"
==`,
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}, {5, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref"), exp(5, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"\\"teal\\"\", \"key4\": {\"key40\": 10}}, \"key5\": 18446744073709551615 }";
@@ -5471,7 +5469,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString;
byte ""teal"" // quotes match
==`,
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}, {5, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref"), exp(5, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \" teal \", \"key4\": {\"key40\": 10}}, \"key5\": 18446744073709551615 }";
@@ -5481,7 +5479,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString;
byte " teal " // spaces match
==`,
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}, {5, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref"), exp(5, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": {\"key40\": 10, \"key40\": \"10\"}}, \"key5\": 18446744073709551615 }";
@@ -5492,7 +5490,7 @@ func TestOpJSONRef(t *testing.T) {
byte "{\"key40\": 10, \"key40\": \"10\"}"
==
`,
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref")},
},
{
source: `byte "{\"rawId\": \"responseId\",\"id\": \"0\",\"response\": {\"attestationObject\": \"based64url_encoded_buffer\",\"clientDataJSON\": \" based64url_encoded_client_data\"},\"getClientExtensionResults\": {},\"type\": \"public-key\"}";
@@ -5500,7 +5498,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONObject;
byte "{\"attestationObject\": \"based64url_encoded_buffer\",\"clientDataJSON\": \" based64url_encoded_client_data\"}" // object as it appeared in input
==`,
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref")},
},
{
source: `byte "{\"rawId\": \"responseId\",\"id\": \"0\",\"response\": {\"attestationObject\": \"based64url_encoded_buffer\",\"clientD\\u0061taJSON\": \" based64url_encoded_client_data\"},\"getClientExtensionResults\": {},\"type\": \"public-key\"}";
@@ -5508,7 +5506,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONObject;
byte "{\"attestationObject\": \"based64url_encoded_buffer\",\"clientD\\u0061taJSON\": \" based64url_encoded_client_data\"}" // object as it appeared in input
==`,
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref")},
},
{
source: `byte "{\"rawId\": \"responseId\",\"id\": \"0\",\"response\": {\"attestationObject\": \"based64url_encoded_buffer\",\"clientDataJSON\": \" based64url_encoded_client_data\"},\"getClientExtensionResults\": {},\"type\": \"public-key\"}";
@@ -5518,7 +5516,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString;
byte " based64url_encoded_client_data";
==`,
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}, {5, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref"), exp(5, "unknown opcode: json_ref")},
},
{
source: `byte "{\"\\u0072\\u0061\\u0077\\u0049\\u0044\": \"responseId\",\"id\": \"0\",\"response\": {\"attestationObject\": \"based64url_encoded_buffer\",\"clientDataJSON\": \" based64url_encoded_client_data\"},\"getClientExtensionResults\": {},\"type\": \"public-key\"}";
@@ -5526,7 +5524,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString;
byte "responseId"
==`,
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref")},
},
// JavaScript MAX_SAFE_INTEGER
{
@@ -5535,7 +5533,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64;
int 9007199254740991;
==`,
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref")},
},
// maximum uint64
{
@@ -5544,7 +5542,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64;
int 18446744073709551615;
==`,
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref")},
},
// larger-than-uint64s are allowed if not requested
{
@@ -5553,7 +5551,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64;
int 0;
==`,
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref")},
},
}
@@ -5588,57 +5586,57 @@ func TestOpJSONRef(t *testing.T) {
failedCases := []struct {
source string
error string
- previousVersErrors []Expect
+ previousVersErrors []expect
}{
{
source: `byte "{\"key0\": 1 }"; byte "key0"; json_ref JSONString;`,
error: "json: cannot unmarshal number into Go value of type string",
- previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(1, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": [1] }"; byte "key0"; json_ref JSONString;`,
error: "json: cannot unmarshal array into Go value of type string",
- previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(1, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": {\"key1\":1} }"; byte "key0"; json_ref JSONString;`,
error: "json: cannot unmarshal object into Go value of type string",
- previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(1, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": \"1\" }"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal string into Go value of type uint64",
- previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(1, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": [\"1\"] }"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal array into Go value of type uint64",
- previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(1, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": {\"key1\":1} }"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal object into Go value of type uint64",
- previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(1, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": [1]}"; byte "key0"; json_ref JSONObject;`,
error: "json: cannot unmarshal array into Go value of type map[string]json.RawMessage",
- previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(1, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": 1}"; byte "key0"; json_ref JSONObject;`,
error: "json: cannot unmarshal number into Go value of type map[string]json.RawMessage",
- previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(1, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": \"1\"}"; byte "key0"; json_ref JSONObject;`,
error: "json: cannot unmarshal string into Go value of type map[string]json.RawMessage",
- previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(1, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": 1,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": [1,2,3]} }"; byte "key3"; json_ref JSONString;`,
error: "key key3 not found in JSON text",
- previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(1, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": 1,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": [1,2,3]}}";
@@ -5648,52 +5646,52 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString
`,
error: "key key5 not found in JSON text",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}, {5, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref"), exp(5, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": -0,\"key1\": 2.5,\"key2\": -3}"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal number -0 into Go value of type uint64",
- previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(1, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": 1e10,\"key1\": 2.5,\"key2\": -3}"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal number 1e10 into Go value of type uint64",
- previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(1, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": 0.2e-2,\"key1\": 2.5,\"key2\": -3}"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal number 0.2e-2 into Go value of type uint64",
- previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(1, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": 1.0,\"key1\": 2.5,\"key2\": -3}"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal number 1.0 into Go value of type uint64",
- previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(1, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": 1.0,\"key1\": 2.5,\"key2\": -3}"; byte "key1"; json_ref JSONUint64;`,
error: "json: cannot unmarshal number 2.5 into Go value of type uint64",
- previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(1, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": 1.0,\"key1\": 2.5,\"key2\": -3}"; byte "key2"; json_ref JSONUint64;`,
error: "json: cannot unmarshal number -3 into Go value of type uint64",
- previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(1, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": 18446744073709551616}"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal number 18446744073709551616 into Go value of type uint64",
- previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(1, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": 1,}"; byte "key0"; json_ref JSONString;`,
error: "error while parsing JSON text, invalid json text",
- previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(1, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": 1, \"key0\": \"3\"}"; byte "key0"; json_ref JSONString;`,
error: "error while parsing JSON text, invalid json text, duplicate keys not allowed",
- previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(1, "unknown opcode: json_ref")},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": {\"key40\": 10, \"key40\": \"should fail!\"}}}";
@@ -5705,7 +5703,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString
`,
error: "error while parsing JSON text, invalid json text, duplicate keys not allowed",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}, {5, "unknown opcode: json_ref"}, {7, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref"), exp(5, "unknown opcode: json_ref"), exp(7, "unknown opcode: json_ref")},
},
{
source: `byte "[1,2,3]";
@@ -5713,7 +5711,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64
`,
error: "error while parsing JSON text, invalid json text, only json object is allowed",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref")},
},
{
source: `byte "2";
@@ -5721,7 +5719,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64
`,
error: "error while parsing JSON text, invalid json text, only json object is allowed",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref")},
},
{
source: `byte "null";
@@ -5729,7 +5727,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64
`,
error: "error while parsing JSON text, invalid json text, only json object is allowed",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref")},
},
{
source: `byte "true";
@@ -5737,7 +5735,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64
`,
error: "error while parsing JSON text, invalid json text, only json object is allowed",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref")},
},
{
source: `byte "\"sometext\"";
@@ -5745,7 +5743,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64
`,
error: "error while parsing JSON text, invalid json text, only json object is allowed",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref")},
},
{
source: `byte "{noquotes: \"shouldn't work\"}";
@@ -5754,7 +5752,7 @@ func TestOpJSONRef(t *testing.T) {
byte "shouldn't work";
==`,
error: "error while parsing JSON text, invalid json text",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref")},
},
// max uint64 + 1 should fail
{
@@ -5764,7 +5762,7 @@ func TestOpJSONRef(t *testing.T) {
int 1;
return`,
error: "json: cannot unmarshal number 18446744073709551616 into Go value of type uint64",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []expect{exp(3, "unknown opcode: json_ref")},
},
}
@@ -5792,8 +5790,7 @@ func TestOpJSONRef(t *testing.T) {
pass, _, err := EvalContract(ops.Program, 0, 888, ep)
require.False(t, pass)
- require.Error(t, err)
- require.EqualError(t, err, s.error)
+ require.ErrorContains(t, err, s.error)
// reset pooled budget for new "app call"
*ep.PooledApplicationBudget = ep.Proto.MaxAppProgramCost
diff --git a/data/transactions/logic/export_test.go b/data/transactions/logic/export_test.go
index 6ee072aff..1a21d9ff1 100644
--- a/data/transactions/logic/export_test.go
+++ b/data/transactions/logic/export_test.go
@@ -24,10 +24,6 @@ import "github.com/algorand/go-algorand/data/basics"
// we export some extra things to make testing easier there. But we do it in a
// _test.go file, so they are only exported during testing.
-func NewExpect(l int, s string) Expect {
- return Expect{l, s}
-}
-
func (ep *EvalParams) Reset() {
ep.reset()
}
@@ -45,6 +41,7 @@ func (l *Ledger) DelBoxes(app basics.AppIndex, names ...string) {
}
var DefaultEvalParams = defaultEvalParams
+var Exp = exp
var MakeSampleEnv = makeSampleEnv
var MakeSampleEnvWithVersion = makeSampleEnvWithVersion
var MakeSampleTxn = makeSampleTxn
@@ -56,6 +53,7 @@ var TestLogic = testLogic
var TestApp = testApp
var TestAppBytes = testAppBytes
var TestApps = testApps
+var TestLogicRange = testLogicRange
var TestProg = testProg
var WithPanicOpcode = withPanicOpcode
diff --git a/data/transactions/logic/fields.go b/data/transactions/logic/fields.go
index 0af2079a3..40660f1f4 100644
--- a/data/transactions/logic/fields.go
+++ b/data/transactions/logic/fields.go
@@ -267,18 +267,18 @@ func (fs txnFieldSpec) Note() string {
}
var txnFieldSpecs = [...]txnFieldSpec{
- {Sender, StackBytes, false, 0, 5, false, "32 byte address"},
+ {Sender, StackAddress, false, 0, 5, false, "32 byte address"},
{Fee, StackUint64, false, 0, 5, false, "microalgos"},
{FirstValid, StackUint64, false, 0, 0, false, "round number"},
{FirstValidTime, StackUint64, false, randomnessVersion, 0, false, "UNIX timestamp of block before txn.FirstValid. Fails if negative"},
{LastValid, StackUint64, false, 0, 0, false, "round number"},
{Note, StackBytes, false, 0, 6, false, "Any data up to 1024 bytes"},
- {Lease, StackBytes, false, 0, 0, false, "32 byte lease value"},
- {Receiver, StackBytes, false, 0, 5, false, "32 byte address"},
+ {Lease, StackBytes32, false, 0, 0, false, "32 byte lease value"},
+ {Receiver, StackAddress, false, 0, 5, false, "32 byte address"},
{Amount, StackUint64, false, 0, 5, false, "microalgos"},
- {CloseRemainderTo, StackBytes, false, 0, 5, false, "32 byte address"},
- {VotePK, StackBytes, false, 0, 6, false, "32 byte address"},
- {SelectionPK, StackBytes, false, 0, 6, false, "32 byte address"},
+ {CloseRemainderTo, StackAddress, false, 0, 5, false, "32 byte address"},
+ {VotePK, StackBytes32, false, 0, 6, false, "32 byte address"},
+ {SelectionPK, StackBytes32, false, 0, 6, false, "32 byte address"},
{VoteFirst, StackUint64, false, 0, 6, false, "The first round that the participation key is valid."},
{VoteLast, StackUint64, false, 0, 6, false, "The last round that the participation key is valid."},
{VoteKeyDilution, StackUint64, false, 0, 6, false, "Dilution for the 2-level participation key"},
@@ -286,42 +286,42 @@ var txnFieldSpecs = [...]txnFieldSpec{
{TypeEnum, StackUint64, false, 0, 5, false, "Transaction type as integer"},
{XferAsset, StackUint64, false, 0, 5, false, "Asset ID"},
{AssetAmount, StackUint64, false, 0, 5, false, "value in Asset's units"},
- {AssetSender, StackBytes, false, 0, 5, false,
+ {AssetSender, StackAddress, false, 0, 5, false,
"32 byte address. Source of assets if Sender is the Asset's Clawback address."},
- {AssetReceiver, StackBytes, false, 0, 5, false, "32 byte address"},
- {AssetCloseTo, StackBytes, false, 0, 5, false, "32 byte address"},
+ {AssetReceiver, StackAddress, false, 0, 5, false, "32 byte address"},
+ {AssetCloseTo, StackAddress, false, 0, 5, false, "32 byte address"},
{GroupIndex, StackUint64, false, 0, 0, false,
"Position of this transaction within an atomic transaction group. A stand-alone transaction is implicitly element 0 in a group of 1"},
- {TxID, StackBytes, false, 0, 0, false, "The computed ID for this transaction. 32 bytes."},
+ {TxID, StackBytes32, false, 0, 0, false, "The computed ID for this transaction. 32 bytes."},
{ApplicationID, StackUint64, false, 2, 6, false, "ApplicationID from ApplicationCall transaction"},
{OnCompletion, StackUint64, false, 2, 6, false, "ApplicationCall transaction on completion action"},
{ApplicationArgs, StackBytes, true, 2, 6, false,
"Arguments passed to the application in the ApplicationCall transaction"},
{NumAppArgs, StackUint64, false, 2, 0, false, "Number of ApplicationArgs"},
- {Accounts, StackBytes, true, 2, 6, false, "Accounts listed in the ApplicationCall transaction"},
+ {Accounts, StackAddress, true, 2, 6, false, "Accounts listed in the ApplicationCall transaction"},
{NumAccounts, StackUint64, false, 2, 0, false, "Number of Accounts"},
{ApprovalProgram, StackBytes, false, 2, 6, false, "Approval program"},
{ClearStateProgram, StackBytes, false, 2, 6, false, "Clear state program"},
- {RekeyTo, StackBytes, false, 2, 6, false, "32 byte Sender's new AuthAddr"},
+ {RekeyTo, StackAddress, false, 2, 6, false, "32 byte Sender's new AuthAddr"},
{ConfigAsset, StackUint64, false, 2, 5, false, "Asset ID in asset config transaction"},
{ConfigAssetTotal, StackUint64, false, 2, 5, false, "Total number of units of this asset created"},
{ConfigAssetDecimals, StackUint64, false, 2, 5, false,
"Number of digits to display after the decimal place when displaying the asset"},
- {ConfigAssetDefaultFrozen, StackUint64, false, 2, 5, false,
+ {ConfigAssetDefaultFrozen, StackBoolean, false, 2, 5, false,
"Whether the asset's slots are frozen by default or not, 0 or 1"},
{ConfigAssetUnitName, StackBytes, false, 2, 5, false, "Unit name of the asset"},
{ConfigAssetName, StackBytes, false, 2, 5, false, "The asset name"},
{ConfigAssetURL, StackBytes, false, 2, 5, false, "URL"},
- {ConfigAssetMetadataHash, StackBytes, false, 2, 5, false,
+ {ConfigAssetMetadataHash, StackBytes32, false, 2, 5, false,
"32 byte commitment to unspecified asset metadata"},
- {ConfigAssetManager, StackBytes, false, 2, 5, false, "32 byte address"},
- {ConfigAssetReserve, StackBytes, false, 2, 5, false, "32 byte address"},
- {ConfigAssetFreeze, StackBytes, false, 2, 5, false, "32 byte address"},
- {ConfigAssetClawback, StackBytes, false, 2, 5, false, "32 byte address"},
+ {ConfigAssetManager, StackAddress, false, 2, 5, false, "32 byte address"},
+ {ConfigAssetReserve, StackAddress, false, 2, 5, false, "32 byte address"},
+ {ConfigAssetFreeze, StackAddress, false, 2, 5, false, "32 byte address"},
+ {ConfigAssetClawback, StackAddress, false, 2, 5, false, "32 byte address"},
{FreezeAsset, StackUint64, false, 2, 5, false, "Asset ID being frozen or un-frozen"},
- {FreezeAssetAccount, StackBytes, false, 2, 5, false,
+ {FreezeAssetAccount, StackAddress, false, 2, 5, false,
"32 byte address of the account whose asset slot is being frozen or un-frozen"},
- {FreezeAssetFrozen, StackUint64, false, 2, 5, false, "The new frozen value, 0 or 1"},
+ {FreezeAssetFrozen, StackBoolean, false, 2, 5, false, "The new frozen value, 0 or 1"},
{Assets, StackUint64, true, 3, 6, false, "Foreign Assets listed in the ApplicationCall transaction"},
{NumAssets, StackUint64, false, 3, 0, false, "Number of Assets"},
{Applications, StackUint64, true, 3, 6, false, "Foreign Apps listed in the ApplicationCall transaction"},
@@ -332,7 +332,7 @@ var txnFieldSpecs = [...]txnFieldSpec{
{LocalNumByteSlice, StackUint64, false, 3, 6, false, "Number of local state byteslices in ApplicationCall"},
{ExtraProgramPages, StackUint64, false, 4, 6, false,
"Number of additional pages for each of the application's approval and clear state programs. An ExtraProgramPages of 1 means 2048 more total bytes, or 1024 for each program."},
- {Nonparticipation, StackUint64, false, 5, 6, false, "Marks an account nonparticipating for rewards"},
+ {Nonparticipation, StackBoolean, false, 5, 6, false, "Marks an account nonparticipating for rewards"},
// "Effects" Last two things are always going to: 0, true
{Logs, StackBytes, true, 5, 0, true, "Log messages emitted by an application call (only with `itxn` in v5)"},
@@ -568,7 +568,7 @@ var globalFieldSpecs = [...]globalFieldSpec{
{MinTxnFee, StackUint64, modeAny, 0, "microalgos"},
{MinBalance, StackUint64, modeAny, 0, "microalgos"},
{MaxTxnLife, StackUint64, modeAny, 0, "rounds"},
- {ZeroAddress, StackBytes, modeAny, 0, "32 byte address of all zero bytes"},
+ {ZeroAddress, StackAddress, modeAny, 0, "32 byte address of all zero bytes"},
{GroupSize, StackUint64, modeAny, 0,
"Number of transactions in this atomic transaction group. At least 1"},
{LogicSigVersion, StackUint64, modeAny, 2, "Maximum supported version"},
@@ -576,17 +576,17 @@ var globalFieldSpecs = [...]globalFieldSpec{
{LatestTimestamp, StackUint64, ModeApp, 2,
"Last confirmed block UNIX timestamp. Fails if negative"},
{CurrentApplicationID, StackUint64, ModeApp, 2, "ID of current application executing"},
- {CreatorAddress, StackBytes, ModeApp, 3,
+ {CreatorAddress, StackAddress, ModeApp, 3,
"Address of the creator of the current application"},
- {CurrentApplicationAddress, StackBytes, ModeApp, 5,
+ {CurrentApplicationAddress, StackAddress, ModeApp, 5,
"Address that the current application controls"},
- {GroupID, StackBytes, modeAny, 5,
+ {GroupID, StackBytes32, modeAny, 5,
"ID of the transaction group. 32 zero bytes if the transaction is not part of a group."},
{OpcodeBudget, StackUint64, modeAny, 6,
"The remaining cost that can be spent by opcodes in this program."},
{CallerApplicationID, StackUint64, ModeApp, 6,
"The application ID of the application that called this application. 0 if this application is at the top-level."},
- {CallerApplicationAddress, StackBytes, ModeApp, 6,
+ {CallerApplicationAddress, StackAddress, ModeApp, 6,
"The application address of the application that called this application. ZeroAddress if this application is at the top-level."},
}
@@ -983,7 +983,7 @@ func (fs assetHoldingFieldSpec) Note() string {
var assetHoldingFieldSpecs = [...]assetHoldingFieldSpec{
{AssetBalance, StackUint64, 2, "Amount of the asset unit held by this account"},
- {AssetFrozen, StackUint64, 2, "Is the asset frozen or not"},
+ {AssetFrozen, StackBoolean, 2, "Is the asset frozen or not"},
}
func assetHoldingFieldSpecByField(f AssetHoldingField) (assetHoldingFieldSpec, bool) {
@@ -1070,16 +1070,16 @@ func (fs assetParamsFieldSpec) Note() string {
var assetParamsFieldSpecs = [...]assetParamsFieldSpec{
{AssetTotal, StackUint64, 2, "Total number of units of this asset"},
{AssetDecimals, StackUint64, 2, "See AssetParams.Decimals"},
- {AssetDefaultFrozen, StackUint64, 2, "Frozen by default or not"},
+ {AssetDefaultFrozen, StackBoolean, 2, "Frozen by default or not"},
{AssetUnitName, StackBytes, 2, "Asset unit name"},
{AssetName, StackBytes, 2, "Asset name"},
{AssetURL, StackBytes, 2, "URL with additional info about the asset"},
- {AssetMetadataHash, StackBytes, 2, "Arbitrary commitment"},
- {AssetManager, StackBytes, 2, "Manager address"},
- {AssetReserve, StackBytes, 2, "Reserve address"},
- {AssetFreeze, StackBytes, 2, "Freeze address"},
- {AssetClawback, StackBytes, 2, "Clawback address"},
- {AssetCreator, StackBytes, 5, "Creator address"},
+ {AssetMetadataHash, StackBytes32, 2, "Arbitrary commitment"},
+ {AssetManager, StackAddress, 2, "Manager address"},
+ {AssetReserve, StackAddress, 2, "Reserve address"},
+ {AssetFreeze, StackAddress, 2, "Freeze address"},
+ {AssetClawback, StackAddress, 2, "Clawback address"},
+ {AssetCreator, StackAddress, 5, "Creator address"},
}
func assetParamsFieldSpecByField(f AssetParamsField) (assetParamsFieldSpec, bool) {
@@ -1166,8 +1166,8 @@ var appParamsFieldSpecs = [...]appParamsFieldSpec{
{AppLocalNumUint, StackUint64, 5, "Number of uint64 values allowed in Local State"},
{AppLocalNumByteSlice, StackUint64, 5, "Number of byte array values allowed in Local State"},
{AppExtraProgramPages, StackUint64, 5, "Number of Extra Program Pages of code space"},
- {AppCreator, StackBytes, 5, "Creator address"},
- {AppAddress, StackBytes, 5, "Address for which this application has authority"},
+ {AppCreator, StackAddress, 5, "Creator address"},
+ {AppAddress, StackAddress, 5, "Address for which this application has authority"},
}
func appParamsFieldSpecByField(f AppParamsField) (appParamsFieldSpec, bool) {
@@ -1259,7 +1259,7 @@ func (fs acctParamsFieldSpec) Note() string {
var acctParamsFieldSpecs = [...]acctParamsFieldSpec{
{AcctBalance, StackUint64, 6, "Account balance in microalgos"},
{AcctMinBalance, StackUint64, 6, "Minimum required balance for account, in microalgos"},
- {AcctAuthAddr, StackBytes, 6, "Address the account is rekeyed to."},
+ {AcctAuthAddr, StackAddress, 6, "Address the account is rekeyed to."},
{AcctTotalNumUint, StackUint64, 8, "The total number of uint64 values allocated by this account in Global and Local States."},
{AcctTotalNumByteSlice, StackUint64, 8, "The total number of byte array values allocated by this account in Global and Local States."},
diff --git a/data/transactions/logic/fields_test.go b/data/transactions/logic/fields_test.go
index cbe425d5f..bedb07907 100644
--- a/data/transactions/logic/fields_test.go
+++ b/data/transactions/logic/fields_test.go
@@ -219,21 +219,20 @@ func TestAssetParamsFieldsVersions(t *testing.T) {
for _, field := range fields {
// Need to use intc so we can "backversion" the
// program and not have it fail because of pushint.
- text := fmt.Sprintf("intcblock 0 1; intc_0; asset_params_get %s; bnz ok; err; ok: ", field.field.String())
- switch field.ftype {
- case StackUint64: // ensure the return type is uint64 by adding
+ text := fmt.Sprintf("intcblock 55 1; intc_0; asset_params_get %s; bnz ok; err; ok: ", field.field.String())
+ switch field.ftype.AVMType {
+ case avmUint64: // ensure the return type is uint64 by adding
text += " intc_1; +"
- case StackBytes: // ensure the return type is bytes by using len
+ case avmBytes: // ensure the return type is bytes by using len
text += " len" // also happens to ensure that we get non empty - the params fields are fixed width
}
// check assembler fails if version before introduction
for v := uint64(2); v <= AssemblerMaxVersion; v++ {
ep, txn, ledger := makeSampleEnv()
- // Create app 55, since txn.ForeignApps[0] == 55
ledger.NewAsset(txn.Sender, 55, basics.AssetParams{})
ep.Proto.LogicSigVersion = v
if field.version > v {
- testProg(t, text, v, Expect{1, "...was introduced in..."})
+ testProg(t, text, v, exp(1, "...was introduced in..."))
ops := testProg(t, text, field.version) // assemble in the future
ops.Program[0] = byte(v)
testAppBytes(t, ops.Program, ep, "invalid asset_params_get field")
@@ -270,26 +269,20 @@ func TestAcctParamsFieldsVersions(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- var fields []acctParamsFieldSpec
- for _, fs := range acctParamsFieldSpecs {
- if fs.version > 6 {
- fields = append(fields, fs)
+ for _, field := range acctParamsFieldSpecs {
+ text := fmt.Sprintf("txn Sender; acct_params_get %s; assert;", field.field.String())
+ if field.ftype.AVMType == avmBytes {
+ text += "global ZeroAddress; concat; len" // use concat to prove we have bytes
+ } else {
+ text += "global ZeroAddress; len; +" // use + to prove we have an int
}
- }
- require.Greater(t, len(fields), 0)
- for _, field := range fields {
- // Need to use intc so we can "backversion" the program and not have it
- // fail because of pushint.
- // Use of '+' confirms the type, which is uint64 for all fields
- text := fmt.Sprintf("intcblock 0 1; intc_0; acct_params_get %s; assert; intc_1; +", field.field.String())
- // check assembler fails if version before introduction
- for v := uint64(2); v <= AssemblerMaxVersion; v++ {
- ep, txn, ledger := makeSampleEnv()
+ testLogicRange(t, 4, 0, func(t *testing.T, ep *EvalParams, txn *transactions.Transaction, ledger *Ledger) {
+ v := ep.Proto.LogicSigVersion
ledger.NewAccount(txn.Sender, 200_000)
- ep.Proto.LogicSigVersion = v
if field.version > v {
- testProg(t, text, v, Expect{1, "...was introduced in..."})
+ // check assembler fails if version before introduction
+ testProg(t, text, v, exp(1, "...was introduced in..."))
ops := testProg(t, text, field.version) // assemble in the future
ops.Program[0] = byte(v) // but set version back to before intro
if v < 6 {
@@ -301,7 +294,7 @@ func TestAcctParamsFieldsVersions(t *testing.T) {
testProg(t, text, v)
testApp(t, text, ep)
}
- }
+ })
}
}
diff --git a/data/transactions/logic/frames_test.go b/data/transactions/logic/frames_test.go
index c46a8dd84..63ff1c21c 100644
--- a/data/transactions/logic/frames_test.go
+++ b/data/transactions/logic/frames_test.go
@@ -50,7 +50,7 @@ func TestDupPopN(t *testing.T) {
testAccepts(t, "int 1; int 1; int 1; popn 2", fpVersion)
testAccepts(t, "int 1; int 0; popn 1", fpVersion)
testPanics(t, "int 1; int 0; popn 2", fpVersion)
- testProg(t, "int 1; int 0; popn 3", LogicVersion, Expect{1, "popn 3 expects 3..."})
+ testProg(t, "int 1; int 0; popn 3", LogicVersion, exp(1, "popn 3 expects 3..."))
testPanics(t, notrack("int 1; int 0; popn 3"), fpVersion)
testAccepts(t, `int 7; dupn 250; dupn 250; dupn 250; dupn 249;
@@ -69,9 +69,9 @@ func TestDupPopNTyping(t *testing.T) {
t.Parallel()
testProg(t, "int 8; dupn 2; +; pop", LogicVersion)
- testProg(t, "int 8; dupn 2; concat; pop", LogicVersion, Expect{1, "...wanted type []byte..."})
+ testProg(t, "int 8; dupn 2; concat; pop", LogicVersion, exp(1, "...wanted type []byte..."))
- testProg(t, "popn 1", LogicVersion, Expect{1, "...expects 1 stack argument..."})
+ testProg(t, "popn 1", LogicVersion, exp(1, "...expects 1 stack argument..."))
}
func TestSimpleFrame(t *testing.T) {
@@ -342,7 +342,7 @@ func TestFrameAccess(t *testing.T) {
int 1
return
`
- testProg(t, source, fpVersion, Expect{4, "frame_dig above stack"})
+ testProg(t, source, fpVersion, exp(4, "frame_dig above stack"))
testPanics(t, notrack(source), fpVersion, "frame_dig above stack")
source = `
@@ -357,7 +357,7 @@ func TestFrameAccess(t *testing.T) {
int 1
return
`
- testProg(t, source, fpVersion, Expect{5, "frame_dig above stack"})
+ testProg(t, source, fpVersion, exp(5, "frame_dig above stack"))
testPanics(t, notrack(source), fpVersion, "frame_dig above stack")
// Note that at the moment of frame_bury, the stack IS big enough, because
@@ -376,7 +376,7 @@ func TestFrameAccess(t *testing.T) {
int 1
return
`
- testProg(t, source, fpVersion, Expect{6, "frame_bury above stack"})
+ testProg(t, source, fpVersion, exp(6, "frame_bury above stack"))
testPanics(t, notrack(source), fpVersion, "frame_bury above stack")
}
@@ -400,7 +400,7 @@ main:
pop // argument popped
frame_dig -1 // but then frame_dig used to get at it
`
- testProg(t, source, fpVersion, Expect{7, "frame_dig above stack"})
+ testProg(t, source, fpVersion, exp(7, "frame_dig above stack"))
testPanics(t, notrack(source), fpVersion, "frame_dig above stack")
testAccepts(t, `
@@ -427,7 +427,7 @@ main:
frame_bury 1;
retsub
`
- testProg(t, source, fpVersion, Expect{8, "frame_dig above stack"})
+ testProg(t, source, fpVersion, exp(8, "frame_dig above stack"))
testPanics(t, notrack(source), fpVersion)
}
@@ -442,7 +442,7 @@ main:
proto 1 1
frame_dig -10 // digging down below arguments
`
- testProg(t, source, fpVersion, Expect{6, "frame_dig -10 in sub with 1 arg..."})
+ testProg(t, source, fpVersion, exp(6, "frame_dig -10 in sub with 1 arg..."))
testPanics(t, notrack(source), fpVersion, "frame_dig -10 in sub with 1 arg")
testPanics(t, `
@@ -459,7 +459,7 @@ main:
proto 1 15
frame_bury -10 // burying down below arguments
`
- testProg(t, source, fpVersion, Expect{6, "frame_bury -10 in sub with 1 arg..."})
+ testProg(t, source, fpVersion, exp(6, "frame_bury -10 in sub with 1 arg..."))
testPanics(t, notrack(source), fpVersion, "frame_bury -10 in sub with 1 arg")
// Without `proto`, frame_bury can't be checked by assembler, but still panics
diff --git a/data/transactions/logic/langspec.json b/data/transactions/logic/langspec.json
index cbfb4b0d9..1892aa42b 100644
--- a/data/transactions/logic/langspec.json
+++ b/data/transactions/logic/langspec.json
@@ -1,6 +1,107 @@
{
- "EvalMaxVersion": 8,
- "LogicSigVersion": 8,
+ "EvalMaxVersion": 9,
+ "LogicSigVersion": 9,
+ "NamedTypes": [
+ {
+ "Name": "uint64",
+ "Abbreviation": "i",
+ "Bound": [
+ 0,
+ 18446744073709551615
+ ],
+ "AVMType": "uint64"
+ },
+ {
+ "Name": "stateKey",
+ "Abbreviation": "K",
+ "Bound": [
+ 0,
+ 64
+ ],
+ "AVMType": "[]byte"
+ },
+ {
+ "Name": "none",
+ "Abbreviation": "x",
+ "Bound": [
+ 0,
+ 0
+ ],
+ "AVMType": "none"
+ },
+ {
+ "Name": "method",
+ "Abbreviation": "M",
+ "Bound": [
+ 4,
+ 4
+ ],
+ "AVMType": "[]byte"
+ },
+ {
+ "Name": "boxName",
+ "Abbreviation": "N",
+ "Bound": [
+ 1,
+ 64
+ ],
+ "AVMType": "[]byte"
+ },
+ {
+ "Name": "bool",
+ "Abbreviation": "T",
+ "Bound": [
+ 0,
+ 1
+ ],
+ "AVMType": "uint64"
+ },
+ {
+ "Name": "bigint",
+ "Abbreviation": "I",
+ "Bound": [
+ 0,
+ 64
+ ],
+ "AVMType": "[]byte"
+ },
+ {
+ "Name": "any",
+ "Abbreviation": "a",
+ "Bound": [
+ 0,
+ 0
+ ],
+ "AVMType": "any"
+ },
+ {
+ "Name": "address",
+ "Abbreviation": "A",
+ "Bound": [
+ 32,
+ 32
+ ],
+ "AVMType": "[]byte"
+ },
+ {
+ "Name": "[]byte",
+ "Abbreviation": "b",
+ "Bound": [
+ 0,
+ 4096
+ ],
+ "AVMType": "[]byte"
+ },
+ {
+ "Name": "[32]byte",
+ "Abbreviation": "H",
+ "Bound": [
+ 32,
+ 32
+ ],
+ "AVMType": "[]byte"
+ }
+ ],
"Ops": [
{
"Opcode": 0,
@@ -15,8 +116,12 @@
{
"Opcode": 1,
"Name": "sha256",
- "Args": "B",
- "Returns": "B",
+ "Args": [
+ "[]byte"
+ ],
+ "Returns": [
+ "[32]byte"
+ ],
"Size": 1,
"Doc": "SHA256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
@@ -27,8 +132,12 @@
{
"Opcode": 2,
"Name": "keccak256",
- "Args": "B",
- "Returns": "B",
+ "Args": [
+ "[]byte"
+ ],
+ "Returns": [
+ "[32]byte"
+ ],
"Size": 1,
"Doc": "Keccak256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
@@ -39,8 +148,12 @@
{
"Opcode": 3,
"Name": "sha512_256",
- "Args": "B",
- "Returns": "B",
+ "Args": [
+ "[]byte"
+ ],
+ "Returns": [
+ "[32]byte"
+ ],
"Size": 1,
"Doc": "SHA512_256 hash of value A, yields [32]byte",
"IntroducedVersion": 1,
@@ -51,8 +164,14 @@
{
"Opcode": 4,
"Name": "ed25519verify",
- "Args": "BBB",
- "Returns": "U",
+ "Args": [
+ "[]byte",
+ "[]byte",
+ "[]byte"
+ ],
+ "Returns": [
+ "bool"
+ ],
"Size": 1,
"Doc": "for (data A, signature B, pubkey C) verify the signature of (\"ProgData\" || program_hash || data) against the pubkey =\u003e {0 or 1}",
"DocExtra": "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack.",
@@ -64,8 +183,16 @@
{
"Opcode": 5,
"Name": "ecdsa_verify",
- "Args": "BBBBB",
- "Returns": "U",
+ "Args": [
+ "[]byte",
+ "[]byte",
+ "[]byte",
+ "[]byte",
+ "[]byte"
+ ],
+ "Returns": [
+ "bool"
+ ],
"Size": 2,
"ArgEnum": [
"Secp256k1",
@@ -73,7 +200,14 @@
],
"Doc": "for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey =\u003e {0 or 1}",
"DocExtra": "The 32 byte Y-component of a public key is the last element on the stack, preceded by X-component of a pubkey, preceded by S and R components of a signature, preceded by the data that is fifth element on the stack. All values are big-endian encoded. The signed data must be 32 bytes long, and signatures in lower-S form are only accepted.",
- "ImmediateNote": "{uint8 curve index}",
+ "ImmediateNote": [
+ {
+ "Comment": "curve index",
+ "Encoding": "uint8",
+ "Name": "V",
+ "Reference": "ECDSA"
+ }
+ ],
"IntroducedVersion": 5,
"Groups": [
"Arithmetic"
@@ -82,8 +216,13 @@
{
"Opcode": 6,
"Name": "ecdsa_pk_decompress",
- "Args": "B",
- "Returns": "BB",
+ "Args": [
+ "[]byte"
+ ],
+ "Returns": [
+ "[]byte",
+ "[]byte"
+ ],
"Size": 2,
"ArgEnum": [
"Secp256k1",
@@ -91,7 +230,14 @@
],
"Doc": "decompress pubkey A into components X, Y",
"DocExtra": "The 33 byte public key in a compressed form to be decompressed into X and Y (top) components. All values are big-endian encoded.",
- "ImmediateNote": "{uint8 curve index}",
+ "ImmediateNote": [
+ {
+ "Comment": "curve index",
+ "Encoding": "uint8",
+ "Name": "V",
+ "Reference": "ECDSA"
+ }
+ ],
"IntroducedVersion": 5,
"Groups": [
"Arithmetic"
@@ -100,8 +246,16 @@
{
"Opcode": 7,
"Name": "ecdsa_pk_recover",
- "Args": "BUBB",
- "Returns": "BB",
+ "Args": [
+ "[]byte",
+ "uint64",
+ "[]byte",
+ "[]byte"
+ ],
+ "Returns": [
+ "[]byte",
+ "[]byte"
+ ],
"Size": 2,
"ArgEnum": [
"Secp256k1",
@@ -109,7 +263,14 @@
],
"Doc": "for (data A, recovery id B, signature C, D) recover a public key",
"DocExtra": "S (top) and R elements of a signature, recovery id and data (bottom) are expected on the stack and used to deriver a public key. All values are big-endian encoded. The signed data must be 32 bytes long.",
- "ImmediateNote": "{uint8 curve index}",
+ "ImmediateNote": [
+ {
+ "Comment": "curve index",
+ "Encoding": "uint8",
+ "Name": "V",
+ "Reference": "ECDSA"
+ }
+ ],
"IntroducedVersion": 5,
"Groups": [
"Arithmetic"
@@ -118,8 +279,13 @@
{
"Opcode": 8,
"Name": "+",
- "Args": "UU",
- "Returns": "U",
+ "Args": [
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "A plus B. Fail on overflow.",
"DocExtra": "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `addw`.",
@@ -131,8 +297,13 @@
{
"Opcode": 9,
"Name": "-",
- "Args": "UU",
- "Returns": "U",
+ "Args": [
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "A minus B. Fail if B \u003e A.",
"IntroducedVersion": 1,
@@ -143,8 +314,13 @@
{
"Opcode": 10,
"Name": "/",
- "Args": "UU",
- "Returns": "U",
+ "Args": [
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "A divided by B (truncated division). Fail if B == 0.",
"DocExtra": "`divmodw` is available to divide the two-element values produced by `mulw` and `addw`.",
@@ -156,8 +332,13 @@
{
"Opcode": 11,
"Name": "*",
- "Args": "UU",
- "Returns": "U",
+ "Args": [
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "A times B. Fail on overflow.",
"DocExtra": "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `mulw`.",
@@ -169,8 +350,13 @@
{
"Opcode": 12,
"Name": "\u003c",
- "Args": "UU",
- "Returns": "U",
+ "Args": [
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "bool"
+ ],
"Size": 1,
"Doc": "A less than B =\u003e {0 or 1}",
"IntroducedVersion": 1,
@@ -181,8 +367,13 @@
{
"Opcode": 13,
"Name": "\u003e",
- "Args": "UU",
- "Returns": "U",
+ "Args": [
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "bool"
+ ],
"Size": 1,
"Doc": "A greater than B =\u003e {0 or 1}",
"IntroducedVersion": 1,
@@ -193,8 +384,13 @@
{
"Opcode": 14,
"Name": "\u003c=",
- "Args": "UU",
- "Returns": "U",
+ "Args": [
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "bool"
+ ],
"Size": 1,
"Doc": "A less than or equal to B =\u003e {0 or 1}",
"IntroducedVersion": 1,
@@ -205,8 +401,13 @@
{
"Opcode": 15,
"Name": "\u003e=",
- "Args": "UU",
- "Returns": "U",
+ "Args": [
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "bool"
+ ],
"Size": 1,
"Doc": "A greater than or equal to B =\u003e {0 or 1}",
"IntroducedVersion": 1,
@@ -217,8 +418,13 @@
{
"Opcode": 16,
"Name": "\u0026\u0026",
- "Args": "UU",
- "Returns": "U",
+ "Args": [
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "bool"
+ ],
"Size": 1,
"Doc": "A is not zero and B is not zero =\u003e {0 or 1}",
"IntroducedVersion": 1,
@@ -229,8 +435,13 @@
{
"Opcode": 17,
"Name": "||",
- "Args": "UU",
- "Returns": "U",
+ "Args": [
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "bool"
+ ],
"Size": 1,
"Doc": "A is not zero or B is not zero =\u003e {0 or 1}",
"IntroducedVersion": 1,
@@ -241,8 +452,13 @@
{
"Opcode": 18,
"Name": "==",
- "Args": "..",
- "Returns": "U",
+ "Args": [
+ "any",
+ "any"
+ ],
+ "Returns": [
+ "bool"
+ ],
"Size": 1,
"Doc": "A is equal to B =\u003e {0 or 1}",
"IntroducedVersion": 1,
@@ -253,8 +469,13 @@
{
"Opcode": 19,
"Name": "!=",
- "Args": "..",
- "Returns": "U",
+ "Args": [
+ "any",
+ "any"
+ ],
+ "Returns": [
+ "bool"
+ ],
"Size": 1,
"Doc": "A is not equal to B =\u003e {0 or 1}",
"IntroducedVersion": 1,
@@ -265,8 +486,12 @@
{
"Opcode": 20,
"Name": "!",
- "Args": "U",
- "Returns": "U",
+ "Args": [
+ "uint64"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "A == 0 yields 1; else 0",
"IntroducedVersion": 1,
@@ -277,8 +502,12 @@
{
"Opcode": 21,
"Name": "len",
- "Args": "B",
- "Returns": "U",
+ "Args": [
+ "[]byte"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "yields length of byte value A",
"IntroducedVersion": 1,
@@ -289,8 +518,12 @@
{
"Opcode": 22,
"Name": "itob",
- "Args": "U",
- "Returns": "B",
+ "Args": [
+ "uint64"
+ ],
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "converts uint64 A to big-endian byte array, always of length 8",
"IntroducedVersion": 1,
@@ -301,8 +534,12 @@
{
"Opcode": 23,
"Name": "btoi",
- "Args": "B",
- "Returns": "U",
+ "Args": [
+ "[]byte"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "converts big-endian byte array A to uint64. Fails if len(A) \u003e 8. Padded by leading 0s if len(A) \u003c 8.",
"DocExtra": "`btoi` fails if the input is longer than 8 bytes.",
@@ -314,8 +551,13 @@
{
"Opcode": 24,
"Name": "%",
- "Args": "UU",
- "Returns": "U",
+ "Args": [
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "A modulo B. Fail if B == 0.",
"IntroducedVersion": 1,
@@ -326,8 +568,13 @@
{
"Opcode": 25,
"Name": "|",
- "Args": "UU",
- "Returns": "U",
+ "Args": [
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "A bitwise-or B",
"IntroducedVersion": 1,
@@ -338,8 +585,13 @@
{
"Opcode": 26,
"Name": "\u0026",
- "Args": "UU",
- "Returns": "U",
+ "Args": [
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "A bitwise-and B",
"IntroducedVersion": 1,
@@ -350,8 +602,13 @@
{
"Opcode": 27,
"Name": "^",
- "Args": "UU",
- "Returns": "U",
+ "Args": [
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "A bitwise-xor B",
"IntroducedVersion": 1,
@@ -362,8 +619,12 @@
{
"Opcode": 28,
"Name": "~",
- "Args": "U",
- "Returns": "U",
+ "Args": [
+ "uint64"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "bitwise invert value A",
"IntroducedVersion": 1,
@@ -374,8 +635,14 @@
{
"Opcode": 29,
"Name": "mulw",
- "Args": "UU",
- "Returns": "UU",
+ "Args": [
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "uint64",
+ "uint64"
+ ],
"Size": 1,
"Doc": "A times B as a 128-bit result in two uint64s. X is the high 64 bits, Y is the low",
"IntroducedVersion": 1,
@@ -386,8 +653,14 @@
{
"Opcode": 30,
"Name": "addw",
- "Args": "UU",
- "Returns": "UU",
+ "Args": [
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "uint64",
+ "uint64"
+ ],
"Size": 1,
"Doc": "A plus B as a 128-bit result. X is the carry-bit, Y is the low-order 64 bits.",
"IntroducedVersion": 2,
@@ -398,8 +671,18 @@
{
"Opcode": 31,
"Name": "divmodw",
- "Args": "UUUU",
- "Returns": "UUUU",
+ "Args": [
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64"
+ ],
"Size": 1,
"Doc": "W,X = (A,B / C,D); Y,Z = (A,B modulo C,D)",
"DocExtra": "The notation J,K indicates that two uint64 values J and K are interpreted as a uint128 value, with J as the high uint64 and K the low.",
@@ -414,7 +697,13 @@
"Size": 0,
"Doc": "prepare block of uint64 constants for use by intc",
"DocExtra": "`intcblock` loads following program bytes into an array of integer constants in the evaluator. These integer constants can be referred to by `intc` and `intc_*` which will push the value onto the stack. Subsequent calls to `intcblock` reset and replace the integer constants available to the script.",
- "ImmediateNote": "{varuint count} [{varuint value}, ...]",
+ "ImmediateNote": [
+ {
+ "Comment": "a block of int constant values",
+ "Encoding": "varuint count, [varuint ...]",
+ "Name": "UINT ..."
+ }
+ ],
"IntroducedVersion": 1,
"Groups": [
"Loading Values"
@@ -423,10 +712,18 @@
{
"Opcode": 33,
"Name": "intc",
- "Returns": "U",
+ "Returns": [
+ "uint64"
+ ],
"Size": 2,
"Doc": "Ith constant from intcblock",
- "ImmediateNote": "{uint8 int constant index}",
+ "ImmediateNote": [
+ {
+ "Comment": "an index in the intcblock",
+ "Encoding": "uint8",
+ "Name": "I"
+ }
+ ],
"IntroducedVersion": 1,
"Groups": [
"Loading Values"
@@ -435,7 +732,9 @@
{
"Opcode": 34,
"Name": "intc_0",
- "Returns": "U",
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "constant 0 from intcblock",
"IntroducedVersion": 1,
@@ -446,7 +745,9 @@
{
"Opcode": 35,
"Name": "intc_1",
- "Returns": "U",
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "constant 1 from intcblock",
"IntroducedVersion": 1,
@@ -457,7 +758,9 @@
{
"Opcode": 36,
"Name": "intc_2",
- "Returns": "U",
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "constant 2 from intcblock",
"IntroducedVersion": 1,
@@ -468,7 +771,9 @@
{
"Opcode": 37,
"Name": "intc_3",
- "Returns": "U",
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "constant 3 from intcblock",
"IntroducedVersion": 1,
@@ -482,7 +787,13 @@
"Size": 0,
"Doc": "prepare block of byte-array constants for use by bytec",
"DocExtra": "`bytecblock` loads the following program bytes into an array of byte-array constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script.",
- "ImmediateNote": "{varuint count} [({varuint length} bytes), ...]",
+ "ImmediateNote": [
+ {
+ "Comment": "a block of byte constant values",
+ "Encoding": "varuint count, [varuint length, bytes ...]",
+ "Name": "BYTES ..."
+ }
+ ],
"IntroducedVersion": 1,
"Groups": [
"Loading Values"
@@ -491,10 +802,18 @@
{
"Opcode": 39,
"Name": "bytec",
- "Returns": "B",
+ "Returns": [
+ "[]byte"
+ ],
"Size": 2,
"Doc": "Ith constant from bytecblock",
- "ImmediateNote": "{uint8 byte constant index}",
+ "ImmediateNote": [
+ {
+ "Comment": "an index in the bytecblock",
+ "Encoding": "uint8",
+ "Name": "I"
+ }
+ ],
"IntroducedVersion": 1,
"Groups": [
"Loading Values"
@@ -503,7 +822,9 @@
{
"Opcode": 40,
"Name": "bytec_0",
- "Returns": "B",
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "constant 0 from bytecblock",
"IntroducedVersion": 1,
@@ -514,7 +835,9 @@
{
"Opcode": 41,
"Name": "bytec_1",
- "Returns": "B",
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "constant 1 from bytecblock",
"IntroducedVersion": 1,
@@ -525,7 +848,9 @@
{
"Opcode": 42,
"Name": "bytec_2",
- "Returns": "B",
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "constant 2 from bytecblock",
"IntroducedVersion": 1,
@@ -536,7 +861,9 @@
{
"Opcode": 43,
"Name": "bytec_3",
- "Returns": "B",
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "constant 3 from bytecblock",
"IntroducedVersion": 1,
@@ -547,10 +874,18 @@
{
"Opcode": 44,
"Name": "arg",
- "Returns": "B",
+ "Returns": [
+ "[]byte"
+ ],
"Size": 2,
"Doc": "Nth LogicSig argument",
- "ImmediateNote": "{uint8 arg index}",
+ "ImmediateNote": [
+ {
+ "Comment": "an arg index",
+ "Encoding": "uint8",
+ "Name": "N"
+ }
+ ],
"IntroducedVersion": 1,
"Groups": [
"Loading Values"
@@ -559,7 +894,9 @@
{
"Opcode": 45,
"Name": "arg_0",
- "Returns": "B",
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "LogicSig argument 0",
"IntroducedVersion": 1,
@@ -570,7 +907,9 @@
{
"Opcode": 46,
"Name": "arg_1",
- "Returns": "B",
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "LogicSig argument 1",
"IntroducedVersion": 1,
@@ -581,7 +920,9 @@
{
"Opcode": 47,
"Name": "arg_2",
- "Returns": "B",
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "LogicSig argument 2",
"IntroducedVersion": 1,
@@ -592,7 +933,9 @@
{
"Opcode": 48,
"Name": "arg_3",
- "Returns": "B",
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "LogicSig argument 3",
"IntroducedVersion": 1,
@@ -603,7 +946,9 @@
{
"Opcode": 49,
"Name": "txn",
- "Returns": ".",
+ "Returns": [
+ "any"
+ ],
"Size": 2,
"ArgEnum": [
"Sender",
@@ -675,9 +1020,85 @@
"ClearStateProgramPages",
"NumClearStateProgramPages"
],
- "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBBBUBU",
+ "ArgEnumTypes": [
+ "address",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "[32]byte",
+ "address",
+ "uint64",
+ "address",
+ "[32]byte",
+ "[32]byte",
+ "uint64",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "uint64",
+ "uint64",
+ "uint64",
+ "address",
+ "address",
+ "address",
+ "uint64",
+ "[32]byte",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "uint64",
+ "address",
+ "uint64",
+ "[]byte",
+ "[]byte",
+ "address",
+ "uint64",
+ "uint64",
+ "uint64",
+ "bool",
+ "[]byte",
+ "[]byte",
+ "[]byte",
+ "[32]byte",
+ "address",
+ "address",
+ "address",
+ "address",
+ "uint64",
+ "address",
+ "bool",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "bool",
+ "[]byte",
+ "uint64",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "[]byte",
+ "[]byte",
+ "uint64",
+ "[]byte",
+ "uint64"
+ ],
"Doc": "field F of current transaction",
- "ImmediateNote": "{uint8 transaction field index}",
+ "ImmediateNote": [
+ {
+ "Comment": "transaction field index",
+ "Encoding": "uint8",
+ "Name": "F",
+ "Reference": "txn"
+ }
+ ],
"IntroducedVersion": 1,
"Groups": [
"Loading Values"
@@ -686,7 +1107,9 @@
{
"Opcode": 50,
"Name": "global",
- "Returns": ".",
+ "Returns": [
+ "any"
+ ],
"Size": 2,
"ArgEnum": [
"MinTxnFee",
@@ -705,9 +1128,32 @@
"CallerApplicationID",
"CallerApplicationAddress"
],
- "ArgEnumTypes": "UUUBUUUUUBBBUUB",
+ "ArgEnumTypes": [
+ "uint64",
+ "uint64",
+ "uint64",
+ "address",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "address",
+ "address",
+ "[32]byte",
+ "uint64",
+ "uint64",
+ "address"
+ ],
"Doc": "global field F",
- "ImmediateNote": "{uint8 global field index}",
+ "ImmediateNote": [
+ {
+ "Comment": "a global field index",
+ "Encoding": "uint8",
+ "Name": "F",
+ "Reference": "global"
+ }
+ ],
"IntroducedVersion": 1,
"Groups": [
"Loading Values"
@@ -716,7 +1162,9 @@
{
"Opcode": 51,
"Name": "gtxn",
- "Returns": ".",
+ "Returns": [
+ "any"
+ ],
"Size": 3,
"ArgEnum": [
"Sender",
@@ -788,10 +1236,91 @@
"ClearStateProgramPages",
"NumClearStateProgramPages"
],
- "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBBBUBU",
+ "ArgEnumTypes": [
+ "address",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "[32]byte",
+ "address",
+ "uint64",
+ "address",
+ "[32]byte",
+ "[32]byte",
+ "uint64",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "uint64",
+ "uint64",
+ "uint64",
+ "address",
+ "address",
+ "address",
+ "uint64",
+ "[32]byte",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "uint64",
+ "address",
+ "uint64",
+ "[]byte",
+ "[]byte",
+ "address",
+ "uint64",
+ "uint64",
+ "uint64",
+ "bool",
+ "[]byte",
+ "[]byte",
+ "[]byte",
+ "[32]byte",
+ "address",
+ "address",
+ "address",
+ "address",
+ "uint64",
+ "address",
+ "bool",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "bool",
+ "[]byte",
+ "uint64",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "[]byte",
+ "[]byte",
+ "uint64",
+ "[]byte",
+ "uint64"
+ ],
"Doc": "field F of the Tth transaction in the current group",
"DocExtra": "for notes on transaction fields available, see `txn`. If this transaction is _i_ in the group, `gtxn i field` is equivalent to `txn field`.",
- "ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index}",
+ "ImmediateNote": [
+ {
+ "Comment": "transaction group index",
+ "Encoding": "uint8",
+ "Name": "T"
+ },
+ {
+ "Comment": "transaction field index",
+ "Encoding": "uint8",
+ "Name": "F",
+ "Reference": "txn"
+ }
+ ],
"IntroducedVersion": 1,
"Groups": [
"Loading Values"
@@ -800,10 +1329,18 @@
{
"Opcode": 52,
"Name": "load",
- "Returns": ".",
+ "Returns": [
+ "any"
+ ],
"Size": 2,
"Doc": "Ith scratch space value. All scratch spaces are 0 at program start.",
- "ImmediateNote": "{uint8 position in scratch space to load from}",
+ "ImmediateNote": [
+ {
+ "Comment": "position in scratch space to load from",
+ "Encoding": "uint8",
+ "Name": "I"
+ }
+ ],
"IntroducedVersion": 1,
"Groups": [
"Loading Values"
@@ -812,10 +1349,18 @@
{
"Opcode": 53,
"Name": "store",
- "Args": ".",
+ "Args": [
+ "any"
+ ],
"Size": 2,
"Doc": "store A to the Ith scratch space",
- "ImmediateNote": "{uint8 position in scratch space to store to}",
+ "ImmediateNote": [
+ {
+ "Comment": "position in scratch space to store to",
+ "Encoding": "uint8",
+ "Name": "I"
+ }
+ ],
"IntroducedVersion": 1,
"Groups": [
"Loading Values"
@@ -824,7 +1369,9 @@
{
"Opcode": 54,
"Name": "txna",
- "Returns": ".",
+ "Returns": [
+ "any"
+ ],
"Size": 3,
"ArgEnum": [
"ApplicationArgs",
@@ -835,9 +1382,29 @@
"ApprovalProgramPages",
"ClearStateProgramPages"
],
- "ArgEnumTypes": "BBUUBBB",
+ "ArgEnumTypes": [
+ "[]byte",
+ "address",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "[]byte",
+ "[]byte"
+ ],
"Doc": "Ith value of the array field F of the current transaction\n`txna` can be called using `txn` with 2 immediates.",
- "ImmediateNote": "{uint8 transaction field index} {uint8 transaction field array index}",
+ "ImmediateNote": [
+ {
+ "Comment": "transaction field index",
+ "Encoding": "uint8",
+ "Name": "F",
+ "Reference": "txna"
+ },
+ {
+ "Comment": "transaction field array index",
+ "Encoding": "uint8",
+ "Name": "I"
+ }
+ ],
"IntroducedVersion": 2,
"Groups": [
"Loading Values"
@@ -846,7 +1413,9 @@
{
"Opcode": 55,
"Name": "gtxna",
- "Returns": ".",
+ "Returns": [
+ "any"
+ ],
"Size": 4,
"ArgEnum": [
"ApplicationArgs",
@@ -857,9 +1426,34 @@
"ApprovalProgramPages",
"ClearStateProgramPages"
],
- "ArgEnumTypes": "BBUUBBB",
+ "ArgEnumTypes": [
+ "[]byte",
+ "address",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "[]byte",
+ "[]byte"
+ ],
"Doc": "Ith value of the array field F from the Tth transaction in the current group\n`gtxna` can be called using `gtxn` with 3 immediates.",
- "ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index} {uint8 transaction field array index}",
+ "ImmediateNote": [
+ {
+ "Comment": "transaction group index",
+ "Encoding": "uint8",
+ "Name": "T"
+ },
+ {
+ "Comment": "transaction field index",
+ "Encoding": "uint8",
+ "Name": "F",
+ "Reference": "txna"
+ },
+ {
+ "Comment": "transaction field array index",
+ "Encoding": "uint8",
+ "Name": "I"
+ }
+ ],
"IntroducedVersion": 2,
"Groups": [
"Loading Values"
@@ -868,8 +1462,12 @@
{
"Opcode": 56,
"Name": "gtxns",
- "Args": "U",
- "Returns": ".",
+ "Args": [
+ "uint64"
+ ],
+ "Returns": [
+ "any"
+ ],
"Size": 2,
"ArgEnum": [
"Sender",
@@ -941,10 +1539,86 @@
"ClearStateProgramPages",
"NumClearStateProgramPages"
],
- "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBBBUBU",
+ "ArgEnumTypes": [
+ "address",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "[32]byte",
+ "address",
+ "uint64",
+ "address",
+ "[32]byte",
+ "[32]byte",
+ "uint64",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "uint64",
+ "uint64",
+ "uint64",
+ "address",
+ "address",
+ "address",
+ "uint64",
+ "[32]byte",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "uint64",
+ "address",
+ "uint64",
+ "[]byte",
+ "[]byte",
+ "address",
+ "uint64",
+ "uint64",
+ "uint64",
+ "bool",
+ "[]byte",
+ "[]byte",
+ "[]byte",
+ "[32]byte",
+ "address",
+ "address",
+ "address",
+ "address",
+ "uint64",
+ "address",
+ "bool",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "bool",
+ "[]byte",
+ "uint64",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "[]byte",
+ "[]byte",
+ "uint64",
+ "[]byte",
+ "uint64"
+ ],
"Doc": "field F of the Ath transaction in the current group",
"DocExtra": "for notes on transaction fields available, see `txn`. If top of stack is _i_, `gtxns field` is equivalent to `gtxn _i_ field`. gtxns exists so that _i_ can be calculated, often based on the index of the current transaction.",
- "ImmediateNote": "{uint8 transaction field index}",
+ "ImmediateNote": [
+ {
+ "Comment": "transaction field index",
+ "Encoding": "uint8",
+ "Name": "F",
+ "Reference": "txn"
+ }
+ ],
"IntroducedVersion": 3,
"Groups": [
"Loading Values"
@@ -953,8 +1627,12 @@
{
"Opcode": 57,
"Name": "gtxnsa",
- "Args": "U",
- "Returns": ".",
+ "Args": [
+ "uint64"
+ ],
+ "Returns": [
+ "any"
+ ],
"Size": 3,
"ArgEnum": [
"ApplicationArgs",
@@ -965,9 +1643,29 @@
"ApprovalProgramPages",
"ClearStateProgramPages"
],
- "ArgEnumTypes": "BBUUBBB",
+ "ArgEnumTypes": [
+ "[]byte",
+ "address",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "[]byte",
+ "[]byte"
+ ],
"Doc": "Ith value of the array field F from the Ath transaction in the current group\n`gtxnsa` can be called using `gtxns` with 2 immediates.",
- "ImmediateNote": "{uint8 transaction field index} {uint8 transaction field array index}",
+ "ImmediateNote": [
+ {
+ "Comment": "transaction field index",
+ "Encoding": "uint8",
+ "Name": "F",
+ "Reference": "txna"
+ },
+ {
+ "Comment": "transaction field array index",
+ "Encoding": "uint8",
+ "Name": "I"
+ }
+ ],
"IntroducedVersion": 3,
"Groups": [
"Loading Values"
@@ -976,11 +1674,24 @@
{
"Opcode": 58,
"Name": "gload",
- "Returns": ".",
+ "Returns": [
+ "any"
+ ],
"Size": 3,
"Doc": "Ith scratch space value of the Tth transaction in the current group",
"DocExtra": "`gload` fails unless the requested transaction is an ApplicationCall and T \u003c GroupIndex.",
- "ImmediateNote": "{uint8 transaction group index} {uint8 position in scratch space to load from}",
+ "ImmediateNote": [
+ {
+ "Comment": "transaction group index",
+ "Encoding": "uint8",
+ "Name": "T"
+ },
+ {
+ "Comment": "position in scratch space to load from",
+ "Encoding": "uint8",
+ "Name": "I"
+ }
+ ],
"IntroducedVersion": 4,
"Groups": [
"Loading Values"
@@ -989,12 +1700,22 @@
{
"Opcode": 59,
"Name": "gloads",
- "Args": "U",
- "Returns": ".",
+ "Args": [
+ "uint64"
+ ],
+ "Returns": [
+ "any"
+ ],
"Size": 2,
"Doc": "Ith scratch space value of the Ath transaction in the current group",
"DocExtra": "`gloads` fails unless the requested transaction is an ApplicationCall and A \u003c GroupIndex.",
- "ImmediateNote": "{uint8 position in scratch space to load from}",
+ "ImmediateNote": [
+ {
+ "Comment": "position in scratch space to load from",
+ "Encoding": "uint8",
+ "Name": "I"
+ }
+ ],
"IntroducedVersion": 4,
"Groups": [
"Loading Values"
@@ -1003,11 +1724,19 @@
{
"Opcode": 60,
"Name": "gaid",
- "Returns": "U",
+ "Returns": [
+ "uint64"
+ ],
"Size": 2,
"Doc": "ID of the asset or application created in the Tth transaction of the current group",
"DocExtra": "`gaid` fails unless the requested transaction created an asset or application and T \u003c GroupIndex.",
- "ImmediateNote": "{uint8 transaction group index}",
+ "ImmediateNote": [
+ {
+ "Comment": "transaction group index",
+ "Encoding": "uint8",
+ "Name": "T"
+ }
+ ],
"IntroducedVersion": 4,
"Groups": [
"Loading Values"
@@ -1016,8 +1745,12 @@
{
"Opcode": 61,
"Name": "gaids",
- "Args": "U",
- "Returns": "U",
+ "Args": [
+ "uint64"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "ID of the asset or application created in the Ath transaction of the current group",
"DocExtra": "`gaids` fails unless the requested transaction created an asset or application and A \u003c GroupIndex.",
@@ -1029,8 +1762,12 @@
{
"Opcode": 62,
"Name": "loads",
- "Args": "U",
- "Returns": ".",
+ "Args": [
+ "uint64"
+ ],
+ "Returns": [
+ "any"
+ ],
"Size": 1,
"Doc": "Ath scratch space value. All scratch spaces are 0 at program start.",
"IntroducedVersion": 5,
@@ -1041,7 +1778,10 @@
{
"Opcode": 63,
"Name": "stores",
- "Args": "U.",
+ "Args": [
+ "uint64",
+ "any"
+ ],
"Size": 1,
"Doc": "store B to the Ath scratch space",
"IntroducedVersion": 5,
@@ -1052,11 +1792,19 @@
{
"Opcode": 64,
"Name": "bnz",
- "Args": "U",
+ "Args": [
+ "uint64"
+ ],
"Size": 3,
"Doc": "branch to TARGET if value A is not zero",
"DocExtra": "The `bnz` instruction opcode 0x40 is followed by two immediate data bytes which are a high byte first and low byte second which together form a 16 bit offset which the instruction may branch to. For a bnz instruction at `pc`, if the last element of the stack is not zero then branch to instruction at `pc + 3 + N`, else proceed to next instruction at `pc + 3`. Branch targets must be aligned instructions. (e.g. Branching to the second byte of a 2 byte op will be rejected.) Starting at v4, the offset is treated as a signed 16 bit integer allowing for backward branches and looping. In prior version (v1 to v3), branch offsets are limited to forward branches only, 0-0x7fff.\n\nAt v2 it became allowed to branch to the end of the program exactly after the last instruction: bnz to byte N (with 0-indexing) was illegal for a TEAL program with N bytes before v2, and is legal after it. This change eliminates the need for a last instruction of no-op as a branch target at the end. (Branching beyond the end--in other words, to a byte larger than N--is still illegal and will cause the program to fail.)",
- "ImmediateNote": "{int16 branch offset, big-endian}",
+ "ImmediateNote": [
+ {
+ "Comment": "branch offset",
+ "Encoding": "int16 (big-endian)",
+ "Name": "TARGET"
+ }
+ ],
"IntroducedVersion": 1,
"Groups": [
"Flow Control"
@@ -1065,11 +1813,19 @@
{
"Opcode": 65,
"Name": "bz",
- "Args": "U",
+ "Args": [
+ "uint64"
+ ],
"Size": 3,
"Doc": "branch to TARGET if value A is zero",
"DocExtra": "See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`.",
- "ImmediateNote": "{int16 branch offset, big-endian}",
+ "ImmediateNote": [
+ {
+ "Comment": "branch offset",
+ "Encoding": "int16 (big-endian)",
+ "Name": "TARGET"
+ }
+ ],
"IntroducedVersion": 2,
"Groups": [
"Flow Control"
@@ -1081,7 +1837,13 @@
"Size": 3,
"Doc": "branch unconditionally to TARGET",
"DocExtra": "See `bnz` for details on how branches work. `b` always jumps to the offset.",
- "ImmediateNote": "{int16 branch offset, big-endian}",
+ "ImmediateNote": [
+ {
+ "Comment": "branch offset",
+ "Encoding": "int16 (big-endian)",
+ "Name": "TARGET"
+ }
+ ],
"IntroducedVersion": 2,
"Groups": [
"Flow Control"
@@ -1090,7 +1852,9 @@
{
"Opcode": 67,
"Name": "return",
- "Args": "U",
+ "Args": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "use A as success value; end",
"IntroducedVersion": 2,
@@ -1101,7 +1865,9 @@
{
"Opcode": 68,
"Name": "assert",
- "Args": "U",
+ "Args": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "immediately fail unless A is a non-zero number",
"IntroducedVersion": 3,
@@ -1112,10 +1878,18 @@
{
"Opcode": 69,
"Name": "bury",
- "Args": ".",
+ "Args": [
+ "any"
+ ],
"Size": 2,
"Doc": "replace the Nth value from the top of the stack with A. bury 0 fails.",
- "ImmediateNote": "{uint8 depth}",
+ "ImmediateNote": [
+ {
+ "Comment": "depth",
+ "Encoding": "uint8",
+ "Name": "N"
+ }
+ ],
"IntroducedVersion": 8,
"Groups": [
"Flow Control"
@@ -1126,7 +1900,13 @@
"Name": "popn",
"Size": 2,
"Doc": "remove N values from the top of the stack",
- "ImmediateNote": "{uint8 stack depth}",
+ "ImmediateNote": [
+ {
+ "Comment": "stack depth",
+ "Encoding": "uint8",
+ "Name": "N"
+ }
+ ],
"IntroducedVersion": 8,
"Groups": [
"Flow Control"
@@ -1135,10 +1915,18 @@
{
"Opcode": 71,
"Name": "dupn",
- "Args": ".",
+ "Args": [
+ "any"
+ ],
"Size": 2,
"Doc": "duplicate A, N times",
- "ImmediateNote": "{uint8 copy count}",
+ "ImmediateNote": [
+ {
+ "Comment": "copy count",
+ "Encoding": "uint8",
+ "Name": "N"
+ }
+ ],
"IntroducedVersion": 8,
"Groups": [
"Flow Control"
@@ -1147,7 +1935,9 @@
{
"Opcode": 72,
"Name": "pop",
- "Args": ".",
+ "Args": [
+ "any"
+ ],
"Size": 1,
"Doc": "discard A",
"IntroducedVersion": 1,
@@ -1158,8 +1948,13 @@
{
"Opcode": 73,
"Name": "dup",
- "Args": ".",
- "Returns": "..",
+ "Args": [
+ "any"
+ ],
+ "Returns": [
+ "any",
+ "any"
+ ],
"Size": 1,
"Doc": "duplicate A",
"IntroducedVersion": 1,
@@ -1170,8 +1965,16 @@
{
"Opcode": 74,
"Name": "dup2",
- "Args": "..",
- "Returns": "....",
+ "Args": [
+ "any",
+ "any"
+ ],
+ "Returns": [
+ "any",
+ "any",
+ "any",
+ "any"
+ ],
"Size": 1,
"Doc": "duplicate A and B",
"IntroducedVersion": 2,
@@ -1182,11 +1985,22 @@
{
"Opcode": 75,
"Name": "dig",
- "Args": ".",
- "Returns": "..",
+ "Args": [
+ "any"
+ ],
+ "Returns": [
+ "any",
+ "any"
+ ],
"Size": 2,
"Doc": "Nth value from the top of the stack. dig 0 is equivalent to dup",
- "ImmediateNote": "{uint8 depth}",
+ "ImmediateNote": [
+ {
+ "Comment": "depth",
+ "Encoding": "uint8",
+ "Name": "N"
+ }
+ ],
"IntroducedVersion": 3,
"Groups": [
"Flow Control"
@@ -1195,8 +2009,14 @@
{
"Opcode": 76,
"Name": "swap",
- "Args": "..",
- "Returns": "..",
+ "Args": [
+ "any",
+ "any"
+ ],
+ "Returns": [
+ "any",
+ "any"
+ ],
"Size": 1,
"Doc": "swaps A and B on stack",
"IntroducedVersion": 3,
@@ -1207,8 +2027,14 @@
{
"Opcode": 77,
"Name": "select",
- "Args": "..U",
- "Returns": ".",
+ "Args": [
+ "any",
+ "any",
+ "uint64"
+ ],
+ "Returns": [
+ "any"
+ ],
"Size": 1,
"Doc": "selects one of two values based on top-of-stack: B if C != 0, else A",
"IntroducedVersion": 3,
@@ -1219,11 +2045,21 @@
{
"Opcode": 78,
"Name": "cover",
- "Args": ".",
- "Returns": ".",
+ "Args": [
+ "any"
+ ],
+ "Returns": [
+ "any"
+ ],
"Size": 2,
"Doc": "remove top of stack, and place it deeper in the stack such that N elements are above it. Fails if stack depth \u003c= N.",
- "ImmediateNote": "{uint8 depth}",
+ "ImmediateNote": [
+ {
+ "Comment": "depth",
+ "Encoding": "uint8",
+ "Name": "N"
+ }
+ ],
"IntroducedVersion": 5,
"Groups": [
"Flow Control"
@@ -1232,11 +2068,21 @@
{
"Opcode": 79,
"Name": "uncover",
- "Args": ".",
- "Returns": ".",
+ "Args": [
+ "any"
+ ],
+ "Returns": [
+ "any"
+ ],
"Size": 2,
"Doc": "remove the value at depth N in the stack and shift above items down so the Nth deep value is on top of the stack. Fails if stack depth \u003c= N.",
- "ImmediateNote": "{uint8 depth}",
+ "ImmediateNote": [
+ {
+ "Comment": "depth",
+ "Encoding": "uint8",
+ "Name": "N"
+ }
+ ],
"IntroducedVersion": 5,
"Groups": [
"Flow Control"
@@ -1245,8 +2091,13 @@
{
"Opcode": 80,
"Name": "concat",
- "Args": "BB",
- "Returns": "B",
+ "Args": [
+ "[]byte",
+ "[]byte"
+ ],
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "join A and B",
"DocExtra": "`concat` fails if the result would be greater than 4096 bytes.",
@@ -1258,11 +2109,26 @@
{
"Opcode": 81,
"Name": "substring",
- "Args": "B",
- "Returns": "B",
+ "Args": [
+ "[]byte"
+ ],
+ "Returns": [
+ "[]byte"
+ ],
"Size": 3,
"Doc": "A range of bytes from A starting at S up to but not including E. If E \u003c S, or either is larger than the array length, the program fails",
- "ImmediateNote": "{uint8 start position} {uint8 end position}",
+ "ImmediateNote": [
+ {
+ "Comment": "start position",
+ "Encoding": "uint8",
+ "Name": "S"
+ },
+ {
+ "Comment": "end position",
+ "Encoding": "uint8",
+ "Name": "E"
+ }
+ ],
"IntroducedVersion": 2,
"Groups": [
"Byte Array Manipulation"
@@ -1271,8 +2137,14 @@
{
"Opcode": 82,
"Name": "substring3",
- "Args": "BUU",
- "Returns": "B",
+ "Args": [
+ "[]byte",
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "A range of bytes from A starting at B up to but not including C. If C \u003c B, or either is larger than the array length, the program fails",
"IntroducedVersion": 2,
@@ -1283,8 +2155,13 @@
{
"Opcode": 83,
"Name": "getbit",
- "Args": ".U",
- "Returns": "U",
+ "Args": [
+ "any",
+ "uint64"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "Bth bit of (byte-array or integer) A. If B is greater than or equal to the bit length of the value (8*byte length), the program fails",
"DocExtra": "see explanation of bit ordering in setbit",
@@ -1296,8 +2173,14 @@
{
"Opcode": 84,
"Name": "setbit",
- "Args": ".UU",
- "Returns": ".",
+ "Args": [
+ "any",
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "any"
+ ],
"Size": 1,
"Doc": "Copy of (byte-array or integer) A, with the Bth bit set to (0 or 1) C. If B is greater than or equal to the bit length of the value (8*byte length), the program fails",
"DocExtra": "When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on the integer 0 yields 8, or 2^3. When A is a byte array, index 0 is the leftmost bit of the leftmost byte. Setting bits 0 through 11 to 1 in a 4-byte-array of 0s yields the byte array 0xfff00000. Setting bit 3 to 1 on the 1-byte-array 0x00 yields the byte array 0x10.",
@@ -1309,8 +2192,13 @@
{
"Opcode": 85,
"Name": "getbyte",
- "Args": "BU",
- "Returns": "U",
+ "Args": [
+ "[]byte",
+ "uint64"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails",
"IntroducedVersion": 3,
@@ -1321,8 +2209,14 @@
{
"Opcode": 86,
"Name": "setbyte",
- "Args": "BUU",
- "Returns": "B",
+ "Args": [
+ "[]byte",
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails",
"IntroducedVersion": 3,
@@ -1333,11 +2227,26 @@
{
"Opcode": 87,
"Name": "extract",
- "Args": "B",
- "Returns": "B",
+ "Args": [
+ "[]byte"
+ ],
+ "Returns": [
+ "[]byte"
+ ],
"Size": 3,
"Doc": "A range of bytes from A starting at S up to but not including S+L. If L is 0, then extract to the end of the string. If S or S+L is larger than the array length, the program fails",
- "ImmediateNote": "{uint8 start position} {uint8 length}",
+ "ImmediateNote": [
+ {
+ "Comment": "start position",
+ "Encoding": "uint8",
+ "Name": "S"
+ },
+ {
+ "Comment": "length",
+ "Encoding": "uint8",
+ "Name": "L"
+ }
+ ],
"IntroducedVersion": 5,
"Groups": [
"Byte Array Manipulation"
@@ -1346,8 +2255,14 @@
{
"Opcode": 88,
"Name": "extract3",
- "Args": "BUU",
- "Returns": "B",
+ "Args": [
+ "[]byte",
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "A range of bytes from A starting at B up to but not including B+C. If B+C is larger than the array length, the program fails\n`extract3` can be called using `extract` with no immediates.",
"IntroducedVersion": 5,
@@ -1358,8 +2273,13 @@
{
"Opcode": 89,
"Name": "extract_uint16",
- "Args": "BU",
- "Returns": "U",
+ "Args": [
+ "[]byte",
+ "uint64"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "A uint16 formed from a range of big-endian bytes from A starting at B up to but not including B+2. If B+2 is larger than the array length, the program fails",
"IntroducedVersion": 5,
@@ -1370,8 +2290,13 @@
{
"Opcode": 90,
"Name": "extract_uint32",
- "Args": "BU",
- "Returns": "U",
+ "Args": [
+ "[]byte",
+ "uint64"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "A uint32 formed from a range of big-endian bytes from A starting at B up to but not including B+4. If B+4 is larger than the array length, the program fails",
"IntroducedVersion": 5,
@@ -1382,8 +2307,13 @@
{
"Opcode": 91,
"Name": "extract_uint64",
- "Args": "BU",
- "Returns": "U",
+ "Args": [
+ "[]byte",
+ "uint64"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "A uint64 formed from a range of big-endian bytes from A starting at B up to but not including B+8. If B+8 is larger than the array length, the program fails",
"IntroducedVersion": 5,
@@ -1394,11 +2324,22 @@
{
"Opcode": 92,
"Name": "replace2",
- "Args": "BB",
- "Returns": "B",
+ "Args": [
+ "[]byte",
+ "[]byte"
+ ],
+ "Returns": [
+ "[]byte"
+ ],
"Size": 2,
"Doc": "Copy of A with the bytes starting at S replaced by the bytes of B. Fails if S+len(B) exceeds len(A)\n`replace2` can be called using `replace` with 1 immediate.",
- "ImmediateNote": "{uint8 start position}",
+ "ImmediateNote": [
+ {
+ "Comment": "start position",
+ "Encoding": "uint8",
+ "Name": "S"
+ }
+ ],
"IntroducedVersion": 7,
"Groups": [
"Byte Array Manipulation"
@@ -1407,8 +2348,14 @@
{
"Opcode": 93,
"Name": "replace3",
- "Args": "BUB",
- "Returns": "B",
+ "Args": [
+ "[]byte",
+ "uint64",
+ "[]byte"
+ ],
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "Copy of A with the bytes starting at B replaced by the bytes of C. Fails if B+len(C) exceeds len(A)\n`replace3` can be called using `replace` with no immediates.",
"IntroducedVersion": 7,
@@ -1419,17 +2366,31 @@
{
"Opcode": 94,
"Name": "base64_decode",
- "Args": "B",
- "Returns": "B",
+ "Args": [
+ "[]byte"
+ ],
+ "Returns": [
+ "[]byte"
+ ],
"Size": 2,
"ArgEnum": [
"URLEncoding",
"StdEncoding"
],
- "ArgEnumTypes": "..",
+ "ArgEnumTypes": [
+ "any",
+ "any"
+ ],
"Doc": "decode A which was base64-encoded using _encoding_ E. Fail if A is not base64 encoded with encoding E",
"DocExtra": "*Warning*: Usage should be restricted to very rare use cases. In almost all cases, smart contracts should directly handle non-encoded byte-strings.\tThis opcode should only be used in cases where base64 is the only available option, e.g. interoperability with a third-party that only signs base64 strings.\n\n Decodes A using the base64 encoding E. Specify the encoding with an immediate arg either as URL and Filename Safe (`URLEncoding`) or Standard (`StdEncoding`). See [RFC 4648 sections 4 and 5](https://rfc-editor.org/rfc/rfc4648.html#section-4). It is assumed that the encoding ends with the exact number of `=` padding characters as required by the RFC. When padding occurs, any unused pad bits in the encoding must be set to zero or the decoding will fail. The special cases of `\\n` and `\\r` are allowed but completely ignored. An error will result when attempting to decode a string with a character that is not in the encoding alphabet or not one of `=`, `\\r`, or `\\n`.",
- "ImmediateNote": "{uint8 encoding index}",
+ "ImmediateNote": [
+ {
+ "Comment": "encoding index",
+ "Encoding": "uint8",
+ "Name": "E",
+ "Reference": "base64"
+ }
+ ],
"IntroducedVersion": 7,
"Groups": [
"Byte Array Manipulation"
@@ -1438,18 +2399,34 @@
{
"Opcode": 95,
"Name": "json_ref",
- "Args": "BB",
- "Returns": ".",
+ "Args": [
+ "[]byte",
+ "[]byte"
+ ],
+ "Returns": [
+ "any"
+ ],
"Size": 2,
"ArgEnum": [
"JSONString",
"JSONUint64",
"JSONObject"
],
- "ArgEnumTypes": "BUB",
+ "ArgEnumTypes": [
+ "[]byte",
+ "uint64",
+ "[]byte"
+ ],
"Doc": "key B's value, of type R, from a [valid](jsonspec.md) utf-8 encoded json object A",
"DocExtra": "*Warning*: Usage should be restricted to very rare use cases, as JSON decoding is expensive and quite limited. In addition, JSON objects are large and not optimized for size.\n\nAlmost all smart contracts should use simpler and smaller methods (such as the [ABI](https://arc.algorand.foundation/ARCs/arc-0004). This opcode should only be used in cases where JSON is only available option, e.g. when a third-party only signs JSON.",
- "ImmediateNote": "{uint8 return type index}",
+ "ImmediateNote": [
+ {
+ "Comment": "return type index",
+ "Encoding": "uint8",
+ "Name": "R",
+ "Reference": "json_ref"
+ }
+ ],
"IntroducedVersion": 7,
"Groups": [
"Byte Array Manipulation"
@@ -1458,11 +2435,15 @@
{
"Opcode": 96,
"Name": "balance",
- "Args": ".",
- "Returns": "U",
+ "Args": [
+ "any"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted. Changes caused by inner transactions are observable immediately following `itxn_submit`",
- "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value.",
+ "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address). Return: value.",
"IntroducedVersion": 2,
"Groups": [
"State Access"
@@ -1471,8 +2452,13 @@
{
"Opcode": 97,
"Name": "app_opted_in",
- "Args": ".U",
- "Returns": "U",
+ "Args": [
+ "any",
+ "uint64"
+ ],
+ "Returns": [
+ "bool"
+ ],
"Size": 1,
"Doc": "1 if account A is opted in to application B, else 0",
"DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: 1 if opted in and 0 otherwise.",
@@ -1484,8 +2470,13 @@
{
"Opcode": 98,
"Name": "app_local_get",
- "Args": ".B",
- "Returns": ".",
+ "Args": [
+ "any",
+ "[]byte"
+ ],
+ "Returns": [
+ "any"
+ ],
"Size": 1,
"Doc": "local state of the key B in the current application in account A",
"DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), state key. Return: value. The value is zero (of type uint64) if the key does not exist.",
@@ -1497,8 +2488,15 @@
{
"Opcode": 99,
"Name": "app_local_get_ex",
- "Args": ".UB",
- "Returns": ".U",
+ "Args": [
+ "any",
+ "uint64",
+ "[]byte"
+ ],
+ "Returns": [
+ "any",
+ "bool"
+ ],
"Size": 1,
"Doc": "X is the local state of application B, key C in account A. Y is 1 if key existed, else 0",
"DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.",
@@ -1510,8 +2508,12 @@
{
"Opcode": 100,
"Name": "app_global_get",
- "Args": "B",
- "Returns": ".",
+ "Args": [
+ "[]byte"
+ ],
+ "Returns": [
+ "any"
+ ],
"Size": 1,
"Doc": "global state of the key A in the current application",
"DocExtra": "params: state key. Return: value. The value is zero (of type uint64) if the key does not exist.",
@@ -1523,8 +2525,14 @@
{
"Opcode": 101,
"Name": "app_global_get_ex",
- "Args": "UB",
- "Returns": ".U",
+ "Args": [
+ "uint64",
+ "[]byte"
+ ],
+ "Returns": [
+ "any",
+ "bool"
+ ],
"Size": 1,
"Doc": "X is the global state of application A, key B. Y is 1 if key existed, else 0",
"DocExtra": "params: Txn.ForeignApps offset (or, since v4, an _available_ application id), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.",
@@ -1536,7 +2544,11 @@
{
"Opcode": 102,
"Name": "app_local_put",
- "Args": ".B.",
+ "Args": [
+ "any",
+ "[]byte",
+ "any"
+ ],
"Size": 1,
"Doc": "write C to key B in account A's local state of the current application",
"DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), state key, value.",
@@ -1548,7 +2560,10 @@
{
"Opcode": 103,
"Name": "app_global_put",
- "Args": "B.",
+ "Args": [
+ "[]byte",
+ "any"
+ ],
"Size": 1,
"Doc": "write B to key A in the global state of the current application",
"IntroducedVersion": 2,
@@ -1559,7 +2574,10 @@
{
"Opcode": 104,
"Name": "app_local_del",
- "Args": ".B",
+ "Args": [
+ "any",
+ "[]byte"
+ ],
"Size": 1,
"Doc": "delete key B from account A's local state of the current application",
"DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), state key.\n\nDeleting a key which is already absent has no effect on the application local state. (In particular, it does _not_ cause the program to fail.)",
@@ -1571,7 +2589,9 @@
{
"Opcode": 105,
"Name": "app_global_del",
- "Args": "B",
+ "Args": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "delete key A from the global state of the current application",
"DocExtra": "params: state key.\n\nDeleting a key which is already absent has no effect on the application global state. (In particular, it does _not_ cause the program to fail.)",
@@ -1583,17 +2603,33 @@
{
"Opcode": 112,
"Name": "asset_holding_get",
- "Args": ".U",
- "Returns": ".U",
+ "Args": [
+ "any",
+ "uint64"
+ ],
+ "Returns": [
+ "any",
+ "bool"
+ ],
"Size": 2,
"ArgEnum": [
"AssetBalance",
"AssetFrozen"
],
- "ArgEnumTypes": "UU",
+ "ArgEnumTypes": [
+ "uint64",
+ "bool"
+ ],
"Doc": "X is field F from account A's holding of asset B. Y is 1 if A is opted into B, else 0",
"DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ address), asset id (or, since v4, a Txn.ForeignAssets offset). Return: did_exist flag (1 if the asset existed and 0 otherwise), value.",
- "ImmediateNote": "{uint8 asset holding field index}",
+ "ImmediateNote": [
+ {
+ "Comment": "asset holding field index",
+ "Encoding": "uint8",
+ "Name": "F",
+ "Reference": "asset_holding"
+ }
+ ],
"IntroducedVersion": 2,
"Groups": [
"State Access"
@@ -1602,8 +2638,13 @@
{
"Opcode": 113,
"Name": "asset_params_get",
- "Args": "U",
- "Returns": ".U",
+ "Args": [
+ "uint64"
+ ],
+ "Returns": [
+ "any",
+ "bool"
+ ],
"Size": 2,
"ArgEnum": [
"AssetTotal",
@@ -1619,10 +2660,30 @@
"AssetClawback",
"AssetCreator"
],
- "ArgEnumTypes": "UUUBBBBBBBBB",
+ "ArgEnumTypes": [
+ "uint64",
+ "uint64",
+ "bool",
+ "[]byte",
+ "[]byte",
+ "[]byte",
+ "[32]byte",
+ "address",
+ "address",
+ "address",
+ "address",
+ "address"
+ ],
"Doc": "X is field F from asset A. Y is 1 if A exists, else 0",
"DocExtra": "params: Txn.ForeignAssets offset (or, since v4, an _available_ asset id. Return: did_exist flag (1 if the asset existed and 0 otherwise), value.",
- "ImmediateNote": "{uint8 asset params field index}",
+ "ImmediateNote": [
+ {
+ "Comment": "asset params field index",
+ "Encoding": "uint8",
+ "Name": "F",
+ "Reference": "asset_params"
+ }
+ ],
"IntroducedVersion": 2,
"Groups": [
"State Access"
@@ -1631,8 +2692,13 @@
{
"Opcode": 114,
"Name": "app_params_get",
- "Args": "U",
- "Returns": ".U",
+ "Args": [
+ "uint64"
+ ],
+ "Returns": [
+ "any",
+ "bool"
+ ],
"Size": 2,
"ArgEnum": [
"AppApprovalProgram",
@@ -1645,10 +2711,27 @@
"AppCreator",
"AppAddress"
],
- "ArgEnumTypes": "BBUUUUUBB",
+ "ArgEnumTypes": [
+ "[]byte",
+ "[]byte",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "address",
+ "address"
+ ],
"Doc": "X is field F from app A. Y is 1 if A exists, else 0",
"DocExtra": "params: Txn.ForeignApps offset or an _available_ app id. Return: did_exist flag (1 if the application existed and 0 otherwise), value.",
- "ImmediateNote": "{uint8 app params field index}",
+ "ImmediateNote": [
+ {
+ "Comment": "app params field index",
+ "Encoding": "uint8",
+ "Name": "F",
+ "Reference": "app_params"
+ }
+ ],
"IntroducedVersion": 5,
"Groups": [
"State Access"
@@ -1657,8 +2740,13 @@
{
"Opcode": 115,
"Name": "acct_params_get",
- "Args": ".",
- "Returns": ".U",
+ "Args": [
+ "any"
+ ],
+ "Returns": [
+ "any",
+ "bool"
+ ],
"Size": 2,
"ArgEnum": [
"AcctBalance",
@@ -1674,9 +2762,29 @@
"AcctTotalBoxes",
"AcctTotalBoxBytes"
],
- "ArgEnumTypes": "UUBUUUUUUUUU",
+ "ArgEnumTypes": [
+ "uint64",
+ "uint64",
+ "address",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64"
+ ],
"Doc": "X is field F from account A. Y is 1 if A owns positive algos, else 0",
- "ImmediateNote": "{uint8 account params field index}",
+ "ImmediateNote": [
+ {
+ "Comment": "account params field index",
+ "Encoding": "uint8",
+ "Name": "F",
+ "Reference": "acct_params"
+ }
+ ],
"IntroducedVersion": 6,
"Groups": [
"State Access"
@@ -1685,11 +2793,15 @@
{
"Opcode": 120,
"Name": "min_balance",
- "Args": ".",
- "Returns": "U",
+ "Args": [
+ "any"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "minimum required balance for account A, in microalgos. Required balance is affected by ASA, App, and Box usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes. Changes caused by inner transactions or box usage are observable immediately following the opcode effecting the change.",
- "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value.",
+ "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address). Return: value.",
"IntroducedVersion": 3,
"Groups": [
"State Access"
@@ -1698,11 +2810,19 @@
{
"Opcode": 128,
"Name": "pushbytes",
- "Returns": "B",
+ "Returns": [
+ "[]byte"
+ ],
"Size": 0,
"Doc": "immediate BYTES",
"DocExtra": "pushbytes args are not added to the bytecblock during assembly processes",
- "ImmediateNote": "{varuint length} {bytes}",
+ "ImmediateNote": [
+ {
+ "Comment": "a byte constant",
+ "Encoding": "varuint length, bytes",
+ "Name": "BYTES"
+ }
+ ],
"IntroducedVersion": 3,
"Groups": [
"Loading Values"
@@ -1711,11 +2831,19 @@
{
"Opcode": 129,
"Name": "pushint",
- "Returns": "U",
+ "Returns": [
+ "uint64"
+ ],
"Size": 0,
"Doc": "immediate UINT",
"DocExtra": "pushint args are not added to the intcblock during assembly processes",
- "ImmediateNote": "{varuint int}",
+ "ImmediateNote": [
+ {
+ "Comment": "an int constant",
+ "Encoding": "varuint",
+ "Name": "UINT"
+ }
+ ],
"IntroducedVersion": 3,
"Groups": [
"Loading Values"
@@ -1727,7 +2855,13 @@
"Size": 0,
"Doc": "push sequences of immediate byte arrays to stack (first byte array being deepest)",
"DocExtra": "pushbytess args are not added to the bytecblock during assembly processes",
- "ImmediateNote": "{varuint count} [({varuint length} bytes), ...]",
+ "ImmediateNote": [
+ {
+ "Comment": "a list of byte constants",
+ "Encoding": "varuint count, [varuint length, bytes ...]",
+ "Name": "BYTES ..."
+ }
+ ],
"IntroducedVersion": 8,
"Groups": [
"Loading Values"
@@ -1739,7 +2873,13 @@
"Size": 0,
"Doc": "push sequence of immediate uints to stack in the order they appear (first uint being deepest)",
"DocExtra": "pushints args are not added to the intcblock during assembly processes",
- "ImmediateNote": "{varuint count} [{varuint value}, ...]",
+ "ImmediateNote": [
+ {
+ "Comment": "a list of int constants",
+ "Encoding": "varuint count, [varuint ...]",
+ "Name": "UINT ..."
+ }
+ ],
"IntroducedVersion": 8,
"Groups": [
"Loading Values"
@@ -1748,8 +2888,14 @@
{
"Opcode": 132,
"Name": "ed25519verify_bare",
- "Args": "BBB",
- "Returns": "U",
+ "Args": [
+ "[]byte",
+ "[]byte",
+ "[]byte"
+ ],
+ "Returns": [
+ "bool"
+ ],
"Size": 1,
"Doc": "for (data A, signature B, pubkey C) verify the signature of the data against the pubkey =\u003e {0 or 1}",
"IntroducedVersion": 7,
@@ -1763,7 +2909,13 @@
"Size": 3,
"Doc": "branch unconditionally to TARGET, saving the next instruction on the call stack",
"DocExtra": "The call stack is separate from the data stack. Only `callsub`, `retsub`, and `proto` manipulate it.",
- "ImmediateNote": "{int16 branch offset, big-endian}",
+ "ImmediateNote": [
+ {
+ "Comment": "branch offset",
+ "Encoding": "int16 (big-endian)",
+ "Name": "TARGET"
+ }
+ ],
"IntroducedVersion": 4,
"Groups": [
"Flow Control"
@@ -1786,7 +2938,18 @@
"Size": 3,
"Doc": "Prepare top call frame for a retsub that will assume A args and R return values.",
"DocExtra": "Fails unless the last instruction executed was a `callsub`.",
- "ImmediateNote": "{uint8 arguments} {uint8 return values}",
+ "ImmediateNote": [
+ {
+ "Comment": "number of arguments",
+ "Encoding": "uint8",
+ "Name": "A"
+ },
+ {
+ "Comment": "number of return values",
+ "Encoding": "uint8",
+ "Name": "R"
+ }
+ ],
"IntroducedVersion": 8,
"Groups": [
"Flow Control"
@@ -1795,10 +2958,18 @@
{
"Opcode": 139,
"Name": "frame_dig",
- "Returns": ".",
+ "Returns": [
+ "any"
+ ],
"Size": 2,
"Doc": "Nth (signed) value from the frame pointer.",
- "ImmediateNote": "{int8 frame slot}",
+ "ImmediateNote": [
+ {
+ "Comment": "frame slot",
+ "Encoding": "int8",
+ "Name": "I"
+ }
+ ],
"IntroducedVersion": 8,
"Groups": [
"Flow Control"
@@ -1807,10 +2978,18 @@
{
"Opcode": 140,
"Name": "frame_bury",
- "Args": ".",
+ "Args": [
+ "any"
+ ],
"Size": 2,
"Doc": "replace the Nth (signed) value from the frame pointer in the stack with A",
- "ImmediateNote": "{int8 frame slot}",
+ "ImmediateNote": [
+ {
+ "Comment": "frame slot",
+ "Encoding": "int8",
+ "Name": "I"
+ }
+ ],
"IntroducedVersion": 8,
"Groups": [
"Flow Control"
@@ -1819,10 +2998,18 @@
{
"Opcode": 141,
"Name": "switch",
- "Args": "U",
+ "Args": [
+ "uint64"
+ ],
"Size": 0,
"Doc": "branch to the Ath label. Continue at following instruction if index A exceeds the number of labels.",
- "ImmediateNote": "{uint8 branch count} [{int16 branch offset, big-endian}, ...]",
+ "ImmediateNote": [
+ {
+ "Comment": "list of labels",
+ "Encoding": "varuint count, [int16 (big-endian) ...]",
+ "Name": "TARGET ..."
+ }
+ ],
"IntroducedVersion": 8,
"Groups": [
"Flow Control"
@@ -1834,7 +3021,13 @@
"Size": 0,
"Doc": "given match cases from A[1] to A[N], branch to the Ith label where A[I] = B. Continue to the following instruction if no matches are found.",
"DocExtra": "`match` consumes N+1 values from the stack. Let the top stack value be B. The following N values represent an ordered list of match cases/constants (A), where the first value (A[0]) is the deepest in the stack. The immediate arguments are an ordered list of N labels (T). `match` will branch to target T[I], where A[I] = B. If there are no matches then execution continues on to the next instruction.",
- "ImmediateNote": "{uint8 branch count} [{int16 branch offset, big-endian}, ...]",
+ "ImmediateNote": [
+ {
+ "Comment": "list of labels",
+ "Encoding": "varuint count, [int16 (big-endian) ...]",
+ "Name": "TARGET ..."
+ }
+ ],
"IntroducedVersion": 8,
"Groups": [
"Flow Control"
@@ -1843,8 +3036,13 @@
{
"Opcode": 144,
"Name": "shl",
- "Args": "UU",
- "Returns": "U",
+ "Args": [
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "A times 2^B, modulo 2^64",
"IntroducedVersion": 4,
@@ -1855,8 +3053,13 @@
{
"Opcode": 145,
"Name": "shr",
- "Args": "UU",
- "Returns": "U",
+ "Args": [
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "A divided by 2^B",
"IntroducedVersion": 4,
@@ -1867,8 +3070,12 @@
{
"Opcode": 146,
"Name": "sqrt",
- "Args": "U",
- "Returns": "U",
+ "Args": [
+ "uint64"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "The largest integer I such that I^2 \u003c= A",
"IntroducedVersion": 4,
@@ -1879,8 +3086,12 @@
{
"Opcode": 147,
"Name": "bitlen",
- "Args": ".",
- "Returns": "U",
+ "Args": [
+ "any"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "The highest set bit in A. If A is a byte-array, it is interpreted as a big-endian unsigned integer. bitlen of 0 is 0, bitlen of 8 is 4",
"DocExtra": "bitlen interprets arrays as big-endian integers, unlike setbit/getbit",
@@ -1892,8 +3103,13 @@
{
"Opcode": 148,
"Name": "exp",
- "Args": "UU",
- "Returns": "U",
+ "Args": [
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "A raised to the Bth power. Fail if A == B == 0 and on overflow",
"IntroducedVersion": 4,
@@ -1904,8 +3120,14 @@
{
"Opcode": 149,
"Name": "expw",
- "Args": "UU",
- "Returns": "UU",
+ "Args": [
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "uint64",
+ "uint64"
+ ],
"Size": 1,
"Doc": "A raised to the Bth power as a 128-bit result in two uint64s. X is the high 64 bits, Y is the low. Fail if A == B == 0 or if the results exceeds 2^128-1",
"IntroducedVersion": 4,
@@ -1916,8 +3138,12 @@
{
"Opcode": 150,
"Name": "bsqrt",
- "Args": "B",
- "Returns": "B",
+ "Args": [
+ "[]byte"
+ ],
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "The largest integer I such that I^2 \u003c= A. A and I are interpreted as big-endian unsigned integers",
"IntroducedVersion": 6,
@@ -1928,8 +3154,14 @@
{
"Opcode": 151,
"Name": "divw",
- "Args": "UUU",
- "Returns": "U",
+ "Args": [
+ "uint64",
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "uint64"
+ ],
"Size": 1,
"Doc": "A,B / C. Fail if C == 0 or if result overflows.",
"DocExtra": "The notation A,B indicates that A and B are interpreted as a uint128 value, with A as the high uint64 and B the low.",
@@ -1941,8 +3173,12 @@
{
"Opcode": 152,
"Name": "sha3_256",
- "Args": "B",
- "Returns": "B",
+ "Args": [
+ "[]byte"
+ ],
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "SHA3_256 hash of value A, yields [32]byte",
"IntroducedVersion": 7,
@@ -1953,8 +3189,13 @@
{
"Opcode": 160,
"Name": "b+",
- "Args": "BB",
- "Returns": "B",
+ "Args": [
+ "bigint",
+ "bigint"
+ ],
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "A plus B. A and B are interpreted as big-endian unsigned integers",
"IntroducedVersion": 4,
@@ -1965,8 +3206,13 @@
{
"Opcode": 161,
"Name": "b-",
- "Args": "BB",
- "Returns": "B",
+ "Args": [
+ "bigint",
+ "bigint"
+ ],
+ "Returns": [
+ "bigint"
+ ],
"Size": 1,
"Doc": "A minus B. A and B are interpreted as big-endian unsigned integers. Fail on underflow.",
"IntroducedVersion": 4,
@@ -1977,8 +3223,13 @@
{
"Opcode": 162,
"Name": "b/",
- "Args": "BB",
- "Returns": "B",
+ "Args": [
+ "bigint",
+ "bigint"
+ ],
+ "Returns": [
+ "bigint"
+ ],
"Size": 1,
"Doc": "A divided by B (truncated division). A and B are interpreted as big-endian unsigned integers. Fail if B is zero.",
"IntroducedVersion": 4,
@@ -1989,8 +3240,13 @@
{
"Opcode": 163,
"Name": "b*",
- "Args": "BB",
- "Returns": "B",
+ "Args": [
+ "bigint",
+ "bigint"
+ ],
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "A times B. A and B are interpreted as big-endian unsigned integers.",
"IntroducedVersion": 4,
@@ -2001,8 +3257,13 @@
{
"Opcode": 164,
"Name": "b\u003c",
- "Args": "BB",
- "Returns": "U",
+ "Args": [
+ "bigint",
+ "bigint"
+ ],
+ "Returns": [
+ "bool"
+ ],
"Size": 1,
"Doc": "1 if A is less than B, else 0. A and B are interpreted as big-endian unsigned integers",
"IntroducedVersion": 4,
@@ -2013,8 +3274,13 @@
{
"Opcode": 165,
"Name": "b\u003e",
- "Args": "BB",
- "Returns": "U",
+ "Args": [
+ "bigint",
+ "bigint"
+ ],
+ "Returns": [
+ "bool"
+ ],
"Size": 1,
"Doc": "1 if A is greater than B, else 0. A and B are interpreted as big-endian unsigned integers",
"IntroducedVersion": 4,
@@ -2025,8 +3291,13 @@
{
"Opcode": 166,
"Name": "b\u003c=",
- "Args": "BB",
- "Returns": "U",
+ "Args": [
+ "bigint",
+ "bigint"
+ ],
+ "Returns": [
+ "bool"
+ ],
"Size": 1,
"Doc": "1 if A is less than or equal to B, else 0. A and B are interpreted as big-endian unsigned integers",
"IntroducedVersion": 4,
@@ -2037,8 +3308,13 @@
{
"Opcode": 167,
"Name": "b\u003e=",
- "Args": "BB",
- "Returns": "U",
+ "Args": [
+ "bigint",
+ "bigint"
+ ],
+ "Returns": [
+ "bool"
+ ],
"Size": 1,
"Doc": "1 if A is greater than or equal to B, else 0. A and B are interpreted as big-endian unsigned integers",
"IntroducedVersion": 4,
@@ -2049,8 +3325,13 @@
{
"Opcode": 168,
"Name": "b==",
- "Args": "BB",
- "Returns": "U",
+ "Args": [
+ "bigint",
+ "bigint"
+ ],
+ "Returns": [
+ "bool"
+ ],
"Size": 1,
"Doc": "1 if A is equal to B, else 0. A and B are interpreted as big-endian unsigned integers",
"IntroducedVersion": 4,
@@ -2061,8 +3342,13 @@
{
"Opcode": 169,
"Name": "b!=",
- "Args": "BB",
- "Returns": "U",
+ "Args": [
+ "bigint",
+ "bigint"
+ ],
+ "Returns": [
+ "bool"
+ ],
"Size": 1,
"Doc": "0 if A is equal to B, else 1. A and B are interpreted as big-endian unsigned integers",
"IntroducedVersion": 4,
@@ -2073,8 +3359,13 @@
{
"Opcode": 170,
"Name": "b%",
- "Args": "BB",
- "Returns": "B",
+ "Args": [
+ "[]byte",
+ "[]byte"
+ ],
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "A modulo B. A and B are interpreted as big-endian unsigned integers. Fail if B is zero.",
"IntroducedVersion": 4,
@@ -2085,8 +3376,13 @@
{
"Opcode": 171,
"Name": "b|",
- "Args": "BB",
- "Returns": "B",
+ "Args": [
+ "[]byte",
+ "[]byte"
+ ],
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "A bitwise-or B. A and B are zero-left extended to the greater of their lengths",
"IntroducedVersion": 4,
@@ -2097,8 +3393,13 @@
{
"Opcode": 172,
"Name": "b\u0026",
- "Args": "BB",
- "Returns": "B",
+ "Args": [
+ "[]byte",
+ "[]byte"
+ ],
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "A bitwise-and B. A and B are zero-left extended to the greater of their lengths",
"IntroducedVersion": 4,
@@ -2109,8 +3410,13 @@
{
"Opcode": 173,
"Name": "b^",
- "Args": "BB",
- "Returns": "B",
+ "Args": [
+ "[]byte",
+ "[]byte"
+ ],
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "A bitwise-xor B. A and B are zero-left extended to the greater of their lengths",
"IntroducedVersion": 4,
@@ -2121,8 +3427,12 @@
{
"Opcode": 174,
"Name": "b~",
- "Args": "B",
- "Returns": "B",
+ "Args": [
+ "[]byte"
+ ],
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "A with all bits inverted",
"IntroducedVersion": 4,
@@ -2133,8 +3443,12 @@
{
"Opcode": 175,
"Name": "bzero",
- "Args": "U",
- "Returns": "B",
+ "Args": [
+ "uint64"
+ ],
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "zero filled byte-array of length A",
"IntroducedVersion": 4,
@@ -2145,7 +3459,9 @@
{
"Opcode": 176,
"Name": "log",
- "Args": "B",
+ "Args": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "write A to log state of the current application",
"DocExtra": "`log` fails if called more than MaxLogCalls times in a program, or if the sum of logged bytes exceeds 1024 bytes.",
@@ -2168,7 +3484,9 @@
{
"Opcode": 178,
"Name": "itxn_field",
- "Args": ".",
+ "Args": [
+ "any"
+ ],
"Size": 2,
"ArgEnum": [
"Sender",
@@ -2223,10 +3541,69 @@
"ApprovalProgramPages",
"ClearStateProgramPages"
],
- "ArgEnumTypes": "BUBBUBBBUUUBUUUBBBUUBBBBBUUUUBBBBBBBBUBUUUUUUUUUBBB",
+ "ArgEnumTypes": [
+ "address",
+ "uint64",
+ "[]byte",
+ "address",
+ "uint64",
+ "address",
+ "[32]byte",
+ "[32]byte",
+ "uint64",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "uint64",
+ "uint64",
+ "uint64",
+ "address",
+ "address",
+ "address",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "address",
+ "[]byte",
+ "[]byte",
+ "address",
+ "uint64",
+ "uint64",
+ "uint64",
+ "bool",
+ "[]byte",
+ "[]byte",
+ "[]byte",
+ "[32]byte",
+ "address",
+ "address",
+ "address",
+ "address",
+ "uint64",
+ "address",
+ "bool",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "bool",
+ "[]byte",
+ "[]byte",
+ "[]byte"
+ ],
"Doc": "set field F of the current inner transaction to A",
"DocExtra": "`itxn_field` fails if A is of the wrong type for F, including a byte array of the wrong size for use as an address when F is an address field. `itxn_field` also fails if A is an account, asset, or app that is not _available_, or an attempt is made extend an array field beyond the limit imposed by consensus parameters. (Addresses set into asset params of acfg transactions need not be _available_.)",
- "ImmediateNote": "{uint8 transaction field index}",
+ "ImmediateNote": [
+ {
+ "Comment": "transaction field index",
+ "Encoding": "uint8",
+ "Name": "F",
+ "Reference": "txn"
+ }
+ ],
"IntroducedVersion": 5,
"Groups": [
"Inner Transactions"
@@ -2246,7 +3623,9 @@
{
"Opcode": 180,
"Name": "itxn",
- "Returns": ".",
+ "Returns": [
+ "any"
+ ],
"Size": 2,
"ArgEnum": [
"Sender",
@@ -2318,9 +3697,85 @@
"ClearStateProgramPages",
"NumClearStateProgramPages"
],
- "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBBBUBU",
+ "ArgEnumTypes": [
+ "address",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "[32]byte",
+ "address",
+ "uint64",
+ "address",
+ "[32]byte",
+ "[32]byte",
+ "uint64",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "uint64",
+ "uint64",
+ "uint64",
+ "address",
+ "address",
+ "address",
+ "uint64",
+ "[32]byte",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "uint64",
+ "address",
+ "uint64",
+ "[]byte",
+ "[]byte",
+ "address",
+ "uint64",
+ "uint64",
+ "uint64",
+ "bool",
+ "[]byte",
+ "[]byte",
+ "[]byte",
+ "[32]byte",
+ "address",
+ "address",
+ "address",
+ "address",
+ "uint64",
+ "address",
+ "bool",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "bool",
+ "[]byte",
+ "uint64",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "[]byte",
+ "[]byte",
+ "uint64",
+ "[]byte",
+ "uint64"
+ ],
"Doc": "field F of the last inner transaction",
- "ImmediateNote": "{uint8 transaction field index}",
+ "ImmediateNote": [
+ {
+ "Comment": "transaction field index",
+ "Encoding": "uint8",
+ "Name": "F",
+ "Reference": "txn"
+ }
+ ],
"IntroducedVersion": 5,
"Groups": [
"Inner Transactions"
@@ -2329,7 +3784,9 @@
{
"Opcode": 181,
"Name": "itxna",
- "Returns": ".",
+ "Returns": [
+ "any"
+ ],
"Size": 3,
"ArgEnum": [
"ApplicationArgs",
@@ -2340,9 +3797,29 @@
"ApprovalProgramPages",
"ClearStateProgramPages"
],
- "ArgEnumTypes": "BBUUBBB",
+ "ArgEnumTypes": [
+ "[]byte",
+ "address",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "[]byte",
+ "[]byte"
+ ],
"Doc": "Ith value of the array field F of the last inner transaction",
- "ImmediateNote": "{uint8 transaction field index} {uint8 transaction field array index}",
+ "ImmediateNote": [
+ {
+ "Comment": "transaction field index",
+ "Encoding": "uint8",
+ "Name": "F",
+ "Reference": "txna"
+ },
+ {
+ "Comment": "a transaction field array index",
+ "Encoding": "uint8",
+ "Name": "I"
+ }
+ ],
"IntroducedVersion": 5,
"Groups": [
"Inner Transactions"
@@ -2362,7 +3839,9 @@
{
"Opcode": 183,
"Name": "gitxn",
- "Returns": ".",
+ "Returns": [
+ "any"
+ ],
"Size": 3,
"ArgEnum": [
"Sender",
@@ -2434,9 +3913,90 @@
"ClearStateProgramPages",
"NumClearStateProgramPages"
],
- "ArgEnumTypes": "BUUUUBBBUBBBUUUBUUUBBBUBUUBUBUBBBUUUUBBBBBBBBUBUUUUUUUUUUUBUUUBBBUBU",
+ "ArgEnumTypes": [
+ "address",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "[32]byte",
+ "address",
+ "uint64",
+ "address",
+ "[32]byte",
+ "[32]byte",
+ "uint64",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "uint64",
+ "uint64",
+ "uint64",
+ "address",
+ "address",
+ "address",
+ "uint64",
+ "[32]byte",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "uint64",
+ "address",
+ "uint64",
+ "[]byte",
+ "[]byte",
+ "address",
+ "uint64",
+ "uint64",
+ "uint64",
+ "bool",
+ "[]byte",
+ "[]byte",
+ "[]byte",
+ "[32]byte",
+ "address",
+ "address",
+ "address",
+ "address",
+ "uint64",
+ "address",
+ "bool",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "uint64",
+ "bool",
+ "[]byte",
+ "uint64",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "[]byte",
+ "[]byte",
+ "uint64",
+ "[]byte",
+ "uint64"
+ ],
"Doc": "field F of the Tth transaction in the last inner group submitted",
- "ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index}",
+ "ImmediateNote": [
+ {
+ "Comment": "transaction group index",
+ "Encoding": "uint8",
+ "Name": "T"
+ },
+ {
+ "Comment": "transaction field index",
+ "Encoding": "uint8",
+ "Name": "F",
+ "Reference": "txn"
+ }
+ ],
"IntroducedVersion": 6,
"Groups": [
"Inner Transactions"
@@ -2445,7 +4005,9 @@
{
"Opcode": 184,
"Name": "gitxna",
- "Returns": ".",
+ "Returns": [
+ "any"
+ ],
"Size": 4,
"ArgEnum": [
"ApplicationArgs",
@@ -2456,9 +4018,34 @@
"ApprovalProgramPages",
"ClearStateProgramPages"
],
- "ArgEnumTypes": "BBUUBBB",
+ "ArgEnumTypes": [
+ "[]byte",
+ "address",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "[]byte",
+ "[]byte"
+ ],
"Doc": "Ith value of the array field F from the Tth transaction in the last inner group submitted",
- "ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index} {uint8 transaction field array index}",
+ "ImmediateNote": [
+ {
+ "Comment": "transaction group index",
+ "Encoding": "uint8",
+ "Name": "T"
+ },
+ {
+ "Comment": "transaction field index",
+ "Encoding": "uint8",
+ "Name": "F",
+ "Reference": "txna"
+ },
+ {
+ "Comment": "transaction field array index",
+ "Encoding": "uint8",
+ "Name": "I"
+ }
+ ],
"IntroducedVersion": 6,
"Groups": [
"Inner Transactions"
@@ -2467,8 +4054,13 @@
{
"Opcode": 185,
"Name": "box_create",
- "Args": "BU",
- "Returns": "U",
+ "Args": [
+ "boxName",
+ "uint64"
+ ],
+ "Returns": [
+ "bool"
+ ],
"Size": 1,
"Doc": "create a box named A, of length B. Fail if A is empty or B exceeds 32,768. Returns 0 if A already existed, else 1",
"DocExtra": "Newly created boxes are filled with 0 bytes. `box_create` will fail if the referenced box already exists with a different size. Otherwise, existing boxes are unchanged by `box_create`.",
@@ -2480,8 +4072,14 @@
{
"Opcode": 186,
"Name": "box_extract",
- "Args": "BUU",
- "Returns": "B",
+ "Args": [
+ "boxName",
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "read C bytes from box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size.",
"IntroducedVersion": 8,
@@ -2492,7 +4090,11 @@
{
"Opcode": 187,
"Name": "box_replace",
- "Args": "BUB",
+ "Args": [
+ "boxName",
+ "uint64",
+ "[]byte"
+ ],
"Size": 1,
"Doc": "write byte-array C into box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size.",
"IntroducedVersion": 8,
@@ -2503,8 +4105,12 @@
{
"Opcode": 188,
"Name": "box_del",
- "Args": "B",
- "Returns": "U",
+ "Args": [
+ "boxName"
+ ],
+ "Returns": [
+ "bool"
+ ],
"Size": 1,
"Doc": "delete box named A if it exists. Return 1 if A existed, 0 otherwise",
"IntroducedVersion": 8,
@@ -2515,8 +4121,13 @@
{
"Opcode": 189,
"Name": "box_len",
- "Args": "B",
- "Returns": "UU",
+ "Args": [
+ "boxName"
+ ],
+ "Returns": [
+ "uint64",
+ "bool"
+ ],
"Size": 1,
"Doc": "X is the length of box A if A exists, else 0. Y is 1 if A exists, else 0.",
"IntroducedVersion": 8,
@@ -2527,8 +4138,13 @@
{
"Opcode": 190,
"Name": "box_get",
- "Args": "B",
- "Returns": "BU",
+ "Args": [
+ "boxName"
+ ],
+ "Returns": [
+ "[]byte",
+ "bool"
+ ],
"Size": 1,
"Doc": "X is the contents of box A if A exists, else ''. Y is 1 if A exists, else 0.",
"DocExtra": "For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `box_replace`",
@@ -2540,7 +4156,10 @@
{
"Opcode": 191,
"Name": "box_put",
- "Args": "BB",
+ "Args": [
+ "boxName",
+ "[]byte"
+ ],
"Size": 1,
"Doc": "replaces the contents of box A with byte-array B. Fails if A exists and len(B) != len(box A). Creates A if it does not exist",
"DocExtra": "For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `box_replace`",
@@ -2552,8 +4171,12 @@
{
"Opcode": 192,
"Name": "txnas",
- "Args": "U",
- "Returns": ".",
+ "Args": [
+ "uint64"
+ ],
+ "Returns": [
+ "any"
+ ],
"Size": 2,
"ArgEnum": [
"ApplicationArgs",
@@ -2564,9 +4187,24 @@
"ApprovalProgramPages",
"ClearStateProgramPages"
],
- "ArgEnumTypes": "BBUUBBB",
+ "ArgEnumTypes": [
+ "[]byte",
+ "address",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "[]byte",
+ "[]byte"
+ ],
"Doc": "Ath value of the array field F of the current transaction",
- "ImmediateNote": "{uint8 transaction field index}",
+ "ImmediateNote": [
+ {
+ "Comment": "transaction field index",
+ "Encoding": "uint8",
+ "Name": "F",
+ "Reference": "txna"
+ }
+ ],
"IntroducedVersion": 5,
"Groups": [
"Loading Values"
@@ -2575,8 +4213,12 @@
{
"Opcode": 193,
"Name": "gtxnas",
- "Args": "U",
- "Returns": ".",
+ "Args": [
+ "uint64"
+ ],
+ "Returns": [
+ "any"
+ ],
"Size": 3,
"ArgEnum": [
"ApplicationArgs",
@@ -2587,9 +4229,29 @@
"ApprovalProgramPages",
"ClearStateProgramPages"
],
- "ArgEnumTypes": "BBUUBBB",
+ "ArgEnumTypes": [
+ "[]byte",
+ "address",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "[]byte",
+ "[]byte"
+ ],
"Doc": "Ath value of the array field F from the Tth transaction in the current group",
- "ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index}",
+ "ImmediateNote": [
+ {
+ "Comment": "transaction group index",
+ "Encoding": "uint8",
+ "Name": "T"
+ },
+ {
+ "Comment": "transaction field index",
+ "Encoding": "uint8",
+ "Name": "F",
+ "Reference": "txna"
+ }
+ ],
"IntroducedVersion": 5,
"Groups": [
"Loading Values"
@@ -2598,8 +4260,13 @@
{
"Opcode": 194,
"Name": "gtxnsas",
- "Args": "UU",
- "Returns": ".",
+ "Args": [
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "any"
+ ],
"Size": 2,
"ArgEnum": [
"ApplicationArgs",
@@ -2610,9 +4277,24 @@
"ApprovalProgramPages",
"ClearStateProgramPages"
],
- "ArgEnumTypes": "BBUUBBB",
+ "ArgEnumTypes": [
+ "[]byte",
+ "address",
+ "uint64",
+ "uint64",
+ "[]byte",
+ "[]byte",
+ "[]byte"
+ ],
"Doc": "Bth value of the array field F from the Ath transaction in the current group",
- "ImmediateNote": "{uint8 transaction field index}",
+ "ImmediateNote": [
+ {
+ "Comment": "transaction field index",
+ "Encoding": "uint8",
+ "Name": "F",
+ "Reference": "txna"
+ }
+ ],
"IntroducedVersion": 5,
"Groups": [
"Loading Values"
@@ -2621,8 +4303,12 @@
{
"Opcode": 195,
"Name": "args",
- "Args": "U",
- "Returns": "B",
+ "Args": [
+ "uint64"
+ ],
+ "Returns": [
+ "[]byte"
+ ],
"Size": 1,
"Doc": "Ath LogicSig argument",
"IntroducedVersion": 5,
@@ -2633,8 +4319,13 @@
{
"Opcode": 196,
"Name": "gloadss",
- "Args": "UU",
- "Returns": ".",
+ "Args": [
+ "uint64",
+ "uint64"
+ ],
+ "Returns": [
+ "any"
+ ],
"Size": 1,
"Doc": "Bth scratch space value of the Ath transaction in the current group",
"IntroducedVersion": 6,
@@ -2645,11 +4336,22 @@
{
"Opcode": 197,
"Name": "itxnas",
- "Args": "U",
- "Returns": ".",
+ "Args": [
+ "uint64"
+ ],
+ "Returns": [
+ "any"
+ ],
"Size": 2,
"Doc": "Ath value of the array field F of the last inner transaction",
- "ImmediateNote": "{uint8 transaction field index}",
+ "ImmediateNote": [
+ {
+ "Comment": "transaction field index",
+ "Encoding": "uint8",
+ "Name": "F",
+ "Reference": "txna"
+ }
+ ],
"IntroducedVersion": 6,
"Groups": [
"Inner Transactions"
@@ -2658,11 +4360,27 @@
{
"Opcode": 198,
"Name": "gitxnas",
- "Args": "U",
- "Returns": ".",
+ "Args": [
+ "uint64"
+ ],
+ "Returns": [
+ "any"
+ ],
"Size": 3,
"Doc": "Ath value of the array field F from the Tth transaction in the last inner group submitted",
- "ImmediateNote": "{uint8 transaction group index} {uint8 transaction field index}",
+ "ImmediateNote": [
+ {
+ "Comment": "transaction group index",
+ "Encoding": "uint8",
+ "Name": "T"
+ },
+ {
+ "Comment": "transaction field index",
+ "Encoding": "uint8",
+ "Name": "F",
+ "Reference": "txna"
+ }
+ ],
"IntroducedVersion": 6,
"Groups": [
"Inner Transactions"
@@ -2671,15 +4389,29 @@
{
"Opcode": 208,
"Name": "vrf_verify",
- "Args": "BBB",
- "Returns": "BU",
+ "Args": [
+ "[]byte",
+ "[]byte",
+ "[]byte"
+ ],
+ "Returns": [
+ "[]byte",
+ "bool"
+ ],
"Size": 2,
"ArgEnum": [
"VrfAlgorand"
],
"Doc": "Verify the proof B of message A against pubkey C. Returns vrf output and verification flag.",
"DocExtra": "`VrfAlgorand` is the VRF used in Algorand. It is ECVRF-ED25519-SHA512-Elligator2, specified in the IETF internet draft [draft-irtf-cfrg-vrf-03](https://datatracker.ietf.org/doc/draft-irtf-cfrg-vrf/03/).",
- "ImmediateNote": "{uint8 parameters index}",
+ "ImmediateNote": [
+ {
+ "Comment": " parameters index",
+ "Encoding": "uint8",
+ "Name": "S",
+ "Reference": "vrf_verify"
+ }
+ ],
"IntroducedVersion": 7,
"Groups": [
"Arithmetic"
@@ -2688,16 +4420,30 @@
{
"Opcode": 209,
"Name": "block",
- "Args": "U",
- "Returns": ".",
+ "Args": [
+ "uint64"
+ ],
+ "Returns": [
+ "any"
+ ],
"Size": 2,
"ArgEnum": [
"BlkSeed",
"BlkTimestamp"
],
- "ArgEnumTypes": "BU",
+ "ArgEnumTypes": [
+ "[]byte",
+ "uint64"
+ ],
"Doc": "field F of block A. Fail unless A falls between txn.LastValid-1002 and txn.FirstValid (exclusive)",
- "ImmediateNote": "{uint8 block field index}",
+ "ImmediateNote": [
+ {
+ "Comment": " block field index",
+ "Encoding": "uint8",
+ "Name": "F",
+ "Reference": "block"
+ }
+ ],
"IntroducedVersion": 7,
"Groups": [
"State Access"
diff --git a/data/transactions/logic/ledger_test.go b/data/transactions/logic/ledger_test.go
index ecda75562..3016ae529 100644
--- a/data/transactions/logic/ledger_test.go
+++ b/data/transactions/logic/ledger_test.go
@@ -328,7 +328,7 @@ func (l *Ledger) Authorizer(addr basics.Address) (basics.Address, error) {
func (l *Ledger) GetGlobal(appIdx basics.AppIndex, key string) (basics.TealValue, bool, error) {
params, ok := l.applications[appIdx]
if !ok {
- return basics.TealValue{}, false, fmt.Errorf("no such app %d", appIdx)
+ return basics.TealValue{}, false, fmt.Errorf("no app %d", appIdx)
}
// return most recent value if available
@@ -351,7 +351,7 @@ func (l *Ledger) GetGlobal(appIdx basics.AppIndex, key string) (basics.TealValue
func (l *Ledger) SetGlobal(appIdx basics.AppIndex, key string, value basics.TealValue) error {
params, ok := l.applications[appIdx]
if !ok {
- return fmt.Errorf("no such app %d", appIdx)
+ return fmt.Errorf("no app %d", appIdx)
}
// if writing the same value, return
@@ -375,7 +375,7 @@ func (l *Ledger) SetGlobal(appIdx basics.AppIndex, key string, value basics.Teal
func (l *Ledger) DelGlobal(appIdx basics.AppIndex, key string) error {
params, ok := l.applications[appIdx]
if !ok {
- return fmt.Errorf("no such app %d", appIdx)
+ return fmt.Errorf("no app %d", appIdx)
}
exist := false
@@ -405,17 +405,17 @@ func (l *Ledger) NewBox(appIdx basics.AppIndex, key string, value []byte, appAdd
}
params, ok := l.applications[appIdx]
if !ok {
- return fmt.Errorf("no such app %d", appIdx)
+ return fmt.Errorf("no app %d", appIdx)
}
if params.boxMods == nil {
params.boxMods = make(map[string][]byte)
}
if current, ok := params.boxMods[key]; ok {
if current != nil {
- return fmt.Errorf("attempt to recreate %s", key)
+ return fmt.Errorf("attempt to recreate box %#v", key)
}
} else if _, ok := params.boxes[key]; ok {
- return fmt.Errorf("attempt to recreate %s", key)
+ return fmt.Errorf("attempt to recreate box %#x", key)
}
params.boxMods[key] = value
l.applications[appIdx] = params
@@ -449,7 +449,7 @@ func (l *Ledger) SetBox(appIdx basics.AppIndex, key string, value []byte) error
return err
}
if !ok {
- return fmt.Errorf("no such box %d", appIdx)
+ return fmt.Errorf("no box %d", appIdx)
}
params := l.applications[appIdx] // assured, based on above
if params.boxMods == nil {
@@ -487,7 +487,7 @@ func (l *Ledger) DelBox(appIdx basics.AppIndex, key string, appAddr basics.Addre
func (l *Ledger) GetLocal(addr basics.Address, appIdx basics.AppIndex, key string, accountIdx uint64) (basics.TealValue, bool, error) {
br, ok := l.balances[addr]
if !ok {
- return basics.TealValue{}, false, fmt.Errorf("no such address")
+ return basics.TealValue{}, false, fmt.Errorf("no account: %s", addr)
}
tkvd, ok := br.locals[appIdx]
if !ok {
@@ -513,7 +513,7 @@ func (l *Ledger) GetLocal(addr basics.Address, appIdx basics.AppIndex, key strin
func (l *Ledger) SetLocal(addr basics.Address, appIdx basics.AppIndex, key string, value basics.TealValue, accountIdx uint64) error {
br, ok := l.balances[addr]
if !ok {
- return fmt.Errorf("no such address")
+ return fmt.Errorf("no account: %s", addr)
}
tkv, ok := br.locals[appIdx]
if !ok {
@@ -541,7 +541,7 @@ func (l *Ledger) SetLocal(addr basics.Address, appIdx basics.AppIndex, key strin
func (l *Ledger) DelLocal(addr basics.Address, appIdx basics.AppIndex, key string, accountIdx uint64) error {
br, ok := l.balances[addr]
if !ok {
- return fmt.Errorf("no such address")
+ return fmt.Errorf("no account: %s", addr)
}
tkv, ok := br.locals[appIdx]
if !ok {
@@ -573,7 +573,7 @@ func (l *Ledger) DelLocal(addr basics.Address, appIdx basics.AppIndex, key strin
func (l *Ledger) OptedIn(addr basics.Address, appIdx basics.AppIndex) (bool, error) {
br, ok := l.balances[addr]
if !ok {
- return false, fmt.Errorf("no such address")
+ return false, fmt.Errorf("no account: %s", addr)
}
_, ok = br.locals[appIdx]
return ok, nil
@@ -586,9 +586,9 @@ func (l *Ledger) AssetHolding(addr basics.Address, assetID basics.AssetIndex) (b
if asset, ok := br.holdings[assetID]; ok {
return asset, nil
}
- return basics.AssetHolding{}, fmt.Errorf("No asset for account")
+ return basics.AssetHolding{}, fmt.Errorf("no asset %d for account %s", assetID, addr)
}
- return basics.AssetHolding{}, fmt.Errorf("no such address")
+ return basics.AssetHolding{}, fmt.Errorf("no account: %s", addr)
}
// AssetParams gives the parameters of an ASA if it exists
@@ -596,7 +596,7 @@ func (l *Ledger) AssetParams(assetID basics.AssetIndex) (basics.AssetParams, bas
if asset, ok := l.assets[assetID]; ok {
return asset.AssetParams, asset.Creator, nil
}
- return basics.AssetParams{}, basics.Address{}, fmt.Errorf("no such asset")
+ return basics.AssetParams{}, basics.Address{}, fmt.Errorf("no asset %d", assetID)
}
// AppParams gives the parameters of an App if it exists
@@ -604,7 +604,7 @@ func (l *Ledger) AppParams(appID basics.AppIndex) (basics.AppParams, basics.Addr
if app, ok := l.applications[appID]; ok {
return app.AppParams, app.Creator, nil
}
- return basics.AppParams{}, basics.Address{}, fmt.Errorf("no such app %d", appID)
+ return basics.AppParams{}, basics.Address{}, fmt.Errorf("no app %d", appID)
}
func (l *Ledger) move(from basics.Address, to basics.Address, amount uint64) error {
@@ -905,7 +905,7 @@ func (l *Ledger) Perform(gi int, ep *EvalParams) error {
func (l *Ledger) Get(addr basics.Address, withPendingRewards bool) (basics.AccountData, error) {
br, ok := l.balances[addr]
if !ok {
- return basics.AccountData{}, fmt.Errorf("addr %s not in test.Ledger", addr.String())
+ return basics.AccountData{}, fmt.Errorf("no account %s", addr)
}
return basics.AccountData{
MicroAlgos: basics.MicroAlgos{Raw: br.balance},
@@ -926,7 +926,7 @@ func (l *Ledger) GetCreator(cidx basics.CreatableIndex, ctype basics.CreatableTy
params, found := l.applications[basics.AppIndex(cidx)]
return params.Creator, found, nil
}
- return basics.Address{}, false, fmt.Errorf("%v %d is not in test.Ledger", ctype, cidx)
+ return basics.Address{}, false, fmt.Errorf("no creatable %v %d", ctype, cidx)
}
// SetKey creates a new key-value in {addr, aidx, global} storage
diff --git a/data/transactions/logic/mocktracer/scenarios.go b/data/transactions/logic/mocktracer/scenarios.go
index f89a1d139..0b336db50 100644
--- a/data/transactions/logic/mocktracer/scenarios.go
+++ b/data/transactions/logic/mocktracer/scenarios.go
@@ -23,9 +23,11 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/txntest"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/protocol"
)
@@ -78,12 +80,18 @@ func fillProgramTemplate(beforeInnersOps, innerApprovalProgram, betweenInnersOps
// TestScenarioInfo holds arguments used to call a TestScenarioGenerator
type TestScenarioInfo struct {
- CallingTxn transactions.Transaction
- MinFee basics.MicroAlgos
- CreatedAppID basics.AppIndex
+ CallingTxn transactions.Transaction
+ SenderData ledgercore.AccountData
+ AppAccountData ledgercore.AccountData
+ FeeSinkData ledgercore.AccountData
+ FeeSinkAddr basics.Address
+ MinFee basics.MicroAlgos
+ CreatedAppID basics.AppIndex
+ BlockHeader bookkeeping.BlockHeader
+ PrevTimestamp int64
}
-func expectedApplyData(info TestScenarioInfo) transactions.ApplyData {
+func expectedApplyDataAndStateDelta(info TestScenarioInfo, appCallProgram string, innerProgramBytes []byte) (transactions.ApplyData, ledgercore.StateDelta, ledgercore.StateDelta, ledgercore.StateDelta, ledgercore.StateDelta) {
expectedInnerAppCall := txntest.Txn{
Type: protocol.ApplicationCallTx,
Sender: info.CreatedAppID.Address(),
@@ -128,7 +136,8 @@ pushint 1`,
Fee: info.MinFee,
}
expectedInnerPay2AD := transactions.ApplyData{}
- return transactions.ApplyData{
+
+ expectedAD := transactions.ApplyData{
ApplicationID: info.CreatedAppID,
EvalDelta: transactions.EvalDelta{
GlobalDelta: basics.StateDelta{},
@@ -150,6 +159,153 @@ pushint 1`,
Logs: []string{"a", "b", "c"},
},
}
+
+ ops, err := logic.AssembleString(appCallProgram)
+ if err != nil {
+ panic(err)
+ }
+
+ expectedSenderData := info.SenderData
+ expectedSenderData.MicroAlgos.Raw -= info.CallingTxn.Fee.Raw
+ expectedSenderData.TotalAppParams++
+ expectedFeeSinkData := info.FeeSinkData
+ expectedFeeSinkData.MicroAlgos.Raw += info.CallingTxn.Fee.Raw
+
+ expectedDeltaCallingTxn := ledgercore.StateDelta{
+ Accts: ledgercore.AccountDeltas{
+ Accts: []ledgercore.BalanceRecord{
+ {
+ Addr: info.CallingTxn.Sender,
+ AccountData: expectedSenderData,
+ },
+ {
+ Addr: info.FeeSinkAddr,
+ AccountData: expectedFeeSinkData,
+ },
+ },
+ AppResources: []ledgercore.AppResourceRecord{
+ {
+ Aidx: info.CreatedAppID,
+ Addr: info.CallingTxn.Sender,
+ Params: ledgercore.AppParamsDelta{
+ Params: &basics.AppParams{
+ ApprovalProgram: ops.Program,
+ ClearStateProgram: info.CallingTxn.ClearStateProgram,
+ StateSchemas: basics.StateSchemas{
+ LocalStateSchema: info.CallingTxn.LocalStateSchema,
+ GlobalStateSchema: info.CallingTxn.GlobalStateSchema,
+ },
+ ExtraProgramPages: info.CallingTxn.ExtraProgramPages,
+ },
+ },
+ },
+ },
+ },
+ Creatables: map[basics.CreatableIndex]ledgercore.ModifiedCreatable{
+ basics.CreatableIndex(info.CreatedAppID): {
+ Ctype: basics.AppCreatable,
+ Created: true,
+ Creator: info.CallingTxn.Sender,
+ },
+ },
+ Txids: map[transactions.Txid]ledgercore.IncludedTransactions{
+ // Cannot call info.CallingTxn.ID() yet, since the txn and its group are not yet final. Instead,
+ // use the Txid zero value as a placeholder. It's up to the caller to update this if they need it.
+ {}: {
+ LastValid: info.CallingTxn.LastValid,
+ Intra: 0,
+ },
+ },
+ Hdr: &info.BlockHeader,
+ PrevTimestamp: info.PrevTimestamp,
+ }
+ expectedDeltaCallingTxn.Hydrate()
+
+ expectedAppAccountData := info.AppAccountData
+ expectedAppAccountData.TotalAppParams++
+ expectedAppAccountData.MicroAlgos.Raw -= info.MinFee.Raw
+ expectedFeeSinkData.MicroAlgos.Raw += info.MinFee.Raw
+
+ expectedDeltaInnerAppCall := ledgercore.StateDelta{
+ Accts: ledgercore.AccountDeltas{
+ Accts: []ledgercore.BalanceRecord{
+ {
+ Addr: info.CreatedAppID.Address(),
+ AccountData: expectedAppAccountData,
+ },
+ {
+ Addr: info.FeeSinkAddr,
+ AccountData: expectedFeeSinkData,
+ },
+ },
+ AppResources: []ledgercore.AppResourceRecord{
+ {
+ Aidx: info.CreatedAppID + 1,
+ Addr: info.CreatedAppID.Address(),
+ Params: ledgercore.AppParamsDelta{
+ Params: &basics.AppParams{
+ ApprovalProgram: innerProgramBytes,
+ ClearStateProgram: []byte{0x06, 0x81, 0x01}, // #pragma version 6; int 1;
+ },
+ },
+ },
+ },
+ },
+ Creatables: map[basics.CreatableIndex]ledgercore.ModifiedCreatable{
+ basics.CreatableIndex(info.CreatedAppID + 1): {
+ Ctype: basics.AppCreatable,
+ Created: true,
+ Creator: info.CreatedAppID.Address(),
+ },
+ },
+ Hdr: &info.BlockHeader,
+ PrevTimestamp: info.PrevTimestamp,
+ }
+ expectedDeltaInnerAppCall.Hydrate()
+
+ expectedAppAccountData.MicroAlgos.Raw -= info.MinFee.Raw
+ expectedFeeSinkData.MicroAlgos.Raw += info.MinFee.Raw
+
+ expectedDeltaInnerPay1 := ledgercore.StateDelta{
+ Accts: ledgercore.AccountDeltas{
+ Accts: []ledgercore.BalanceRecord{
+ {
+ Addr: info.CreatedAppID.Address(),
+ AccountData: expectedAppAccountData,
+ },
+ {
+ Addr: info.FeeSinkAddr,
+ AccountData: expectedFeeSinkData,
+ },
+ },
+ },
+ Hdr: &info.BlockHeader,
+ PrevTimestamp: info.PrevTimestamp,
+ }
+ expectedDeltaInnerPay1.Hydrate()
+
+ expectedAppAccountData.MicroAlgos.Raw -= info.MinFee.Raw
+ expectedFeeSinkData.MicroAlgos.Raw += info.MinFee.Raw
+
+ expectedDeltaInnerPay2 := ledgercore.StateDelta{
+ Accts: ledgercore.AccountDeltas{
+ Accts: []ledgercore.BalanceRecord{
+ {
+ Addr: info.CreatedAppID.Address(),
+ AccountData: expectedAppAccountData,
+ },
+ {
+ Addr: info.FeeSinkAddr,
+ AccountData: expectedFeeSinkData,
+ },
+ },
+ },
+ Hdr: &info.BlockHeader,
+ PrevTimestamp: info.PrevTimestamp,
+ }
+ expectedDeltaInnerPay2.Hydrate()
+
+ return expectedAD, expectedDeltaCallingTxn, expectedDeltaInnerAppCall, expectedDeltaInnerPay1, expectedDeltaInnerPay2
}
// TestScenarioOutcome represents an outcome of a TestScenario
@@ -172,6 +328,10 @@ type TestScenario struct {
FailedAt []uint64
ExpectedEvents []Event
ExpectedSimulationAD transactions.ApplyData
+ ExpectedStateDelta ledgercore.StateDelta
+ AppBudgetAdded uint64
+ AppBudgetConsumed uint64
+ TxnAppBudgetConsumed []uint64
}
// TestScenarioGenerator is a function which instantiates a TestScenario
@@ -196,8 +356,10 @@ func GetTestScenarios() map[string]TestScenarioGenerator {
noFailureName := "none"
noFailure := func(info TestScenarioInfo) TestScenario {
- expectedAD := expectedApplyData(info)
program := fillProgramTemplate("", successInnerProgram, "", 1, 2, "pushint 1")
+ expectedAD, expectedDeltaCallingTxn, expectedDeltaInnerAppCall, expectedDeltaInnerPay1, expectedDeltaInnerPay2 := expectedApplyDataAndStateDelta(info, program, successInnerProgramBytes)
+ expectedDelta := MergeStateDeltas(expectedDeltaCallingTxn, expectedDeltaInnerAppCall, expectedDeltaInnerPay1, expectedDeltaInnerPay2)
+
return TestScenario{
Outcome: ApprovalOutcome,
Program: program,
@@ -219,7 +381,7 @@ func GetTestScenarios() map[string]TestScenarioGenerator {
{
AfterProgram(logic.ModeApp, false),
AfterTxn(protocol.ApplicationCallTx, expectedAD.EvalDelta.InnerTxns[0].ApplyData, false),
- AfterTxnGroup(1, false), // end first itxn group
+ AfterTxnGroup(1, nil, false), // end first itxn group
AfterOpcode(false),
},
OpcodeEvents(16, false),
@@ -230,7 +392,7 @@ func GetTestScenarios() map[string]TestScenarioGenerator {
AfterTxn(protocol.PaymentTx, expectedAD.EvalDelta.InnerTxns[1].ApplyData, false),
BeforeTxn(protocol.PaymentTx),
AfterTxn(protocol.PaymentTx, expectedAD.EvalDelta.InnerTxns[2].ApplyData, false),
- AfterTxnGroup(2, false), // end second itxn group
+ AfterTxnGroup(2, nil, false), // end second itxn group
AfterOpcode(false),
},
OpcodeEvents(3, false),
@@ -240,6 +402,10 @@ func GetTestScenarios() map[string]TestScenarioGenerator {
},
}),
ExpectedSimulationAD: expectedAD,
+ ExpectedStateDelta: expectedDelta,
+ AppBudgetAdded: 2100,
+ AppBudgetConsumed: 35,
+ TxnAppBudgetConsumed: []uint64{0, 35},
}
}
@@ -267,8 +433,13 @@ func GetTestScenarios() map[string]TestScenarioGenerator {
beforeInnersName := fmt.Sprintf("before inners,error=%t", shouldError)
beforeInners := func(info TestScenarioInfo) TestScenario {
- expectedAD := expectedApplyData(info)
program := fillProgramTemplate(failureOps, successInnerProgram, "", 1, 2, "pushint 1")
+ expectedAD, expectedDeltaCallingTxn, _, _, _ := expectedApplyDataAndStateDelta(info, program, successInnerProgramBytes)
+ expectedDelta := expectedDeltaCallingTxn
+
+ // remove failed txids from delta
+ expectedDeltaCallingTxn.Txids = nil
+
// EvalDeltas are removed from failed app call transactions
expectedADNoED := expectedAD
expectedADNoED.EvalDelta = transactions.EvalDelta{}
@@ -292,13 +463,22 @@ func GetTestScenarios() map[string]TestScenarioGenerator {
},
}),
ExpectedSimulationAD: expectedAD,
+ ExpectedStateDelta: expectedDelta,
+ AppBudgetAdded: 700,
+ AppBudgetConsumed: 4,
+ TxnAppBudgetConsumed: []uint64{0, 4},
}
}
scenarios[beforeInnersName] = beforeInners
firstInnerName := fmt.Sprintf("first inner,error=%t", shouldError)
firstInner := func(info TestScenarioInfo) TestScenario {
- expectedAD := expectedApplyData(info)
+ program := fillProgramTemplate("", failureInnerProgram, "", 1, 2, "pushint 1")
+ expectedAD, expectedDeltaCallingTxn, _, _, _ := expectedApplyDataAndStateDelta(info, program, failureInnerProgramBytes)
+ expectedDelta := expectedDeltaCallingTxn
+
+ // remove failed txids from delta
+ expectedDeltaCallingTxn.Txids = nil
// EvalDeltas are removed from failed app call transactions
expectedInnerAppCallADNoEvalDelta := expectedAD.EvalDelta.InnerTxns[0].ApplyData
@@ -311,8 +491,6 @@ func GetTestScenarios() map[string]TestScenarioGenerator {
expectedAD.EvalDelta.InnerTxns = expectedAD.EvalDelta.InnerTxns[:1]
expectedAD.EvalDelta.InnerTxns[0].Txn.ApprovalProgram = failureInnerProgramBytes
-
- program := fillProgramTemplate("", failureInnerProgram, "", 1, 2, "pushint 1")
return TestScenario{
Outcome: outcome,
Program: program,
@@ -334,20 +512,30 @@ func GetTestScenarios() map[string]TestScenarioGenerator {
{
AfterProgram(logic.ModeApp, shouldError),
AfterTxn(protocol.ApplicationCallTx, expectedInnerAppCallADNoEvalDelta, true),
- AfterTxnGroup(1, true), // end first itxn group
+ AfterTxnGroup(1, nil, true), // end first itxn group
AfterOpcode(true),
AfterProgram(logic.ModeApp, true),
AfterTxn(protocol.ApplicationCallTx, expectedADNoED, true),
},
}),
ExpectedSimulationAD: expectedAD,
+ ExpectedStateDelta: expectedDelta,
+ AppBudgetAdded: 1400,
+ AppBudgetConsumed: 15,
+ TxnAppBudgetConsumed: []uint64{0, 15},
}
}
scenarios[firstInnerName] = firstInner
betweenInnersName := fmt.Sprintf("between inners,error=%t", shouldError)
betweenInners := func(info TestScenarioInfo) TestScenario {
- expectedAD := expectedApplyData(info)
+ program := fillProgramTemplate("", successInnerProgram, failureOps, 1, 2, "pushint 1")
+ expectedAD, expectedDeltaCallingTxn, _, _, _ := expectedApplyDataAndStateDelta(info, program, successInnerProgramBytes)
+ expectedDelta := expectedDeltaCallingTxn
+
+ // remove failed txids from delta
+ expectedDeltaCallingTxn.Txids = nil
+
expectedInnerAppCallAD := expectedAD.EvalDelta.InnerTxns[0].ApplyData
// EvalDeltas are removed from failed app call transactions
@@ -358,8 +546,6 @@ func GetTestScenarios() map[string]TestScenarioGenerator {
expectedAD.EvalDelta.Logs = expectedAD.EvalDelta.Logs[:2]
expectedAD.EvalDelta.InnerTxns = expectedAD.EvalDelta.InnerTxns[:1]
-
- program := fillProgramTemplate("", successInnerProgram, failureOps, 1, 2, "pushint 1")
return TestScenario{
Outcome: outcome,
Program: program,
@@ -381,7 +567,7 @@ func GetTestScenarios() map[string]TestScenarioGenerator {
{
AfterProgram(logic.ModeApp, false),
AfterTxn(protocol.ApplicationCallTx, expectedInnerAppCallAD, false),
- AfterTxnGroup(1, false), // end first itxn group
+ AfterTxnGroup(1, nil, false), // end first itxn group
AfterOpcode(false),
},
OpcodeEvents(4, shouldError),
@@ -391,6 +577,10 @@ func GetTestScenarios() map[string]TestScenarioGenerator {
},
}),
ExpectedSimulationAD: expectedAD,
+ ExpectedStateDelta: expectedDelta,
+ AppBudgetAdded: 1400,
+ AppBudgetConsumed: 19,
+ TxnAppBudgetConsumed: []uint64{0, 19},
}
}
scenarios[betweenInnersName] = betweenInners
@@ -398,7 +588,13 @@ func GetTestScenarios() map[string]TestScenarioGenerator {
if shouldError {
secondInnerName := "second inner"
secondInner := func(info TestScenarioInfo) TestScenario {
- expectedAD := expectedApplyData(info)
+ program := fillProgramTemplate("", successInnerProgram, "", math.MaxUint64, 2, "pushint 1")
+ expectedAD, expectedDeltaCallingTxn, _, _, _ := expectedApplyDataAndStateDelta(info, program, successInnerProgramBytes)
+ expectedDelta := expectedDeltaCallingTxn
+
+ // remove failed txids from delta
+ expectedDeltaCallingTxn.Txids = nil
+
expectedInnerAppCallAD := expectedAD.EvalDelta.InnerTxns[0].ApplyData
expectedInnerPay1AD := expectedAD.EvalDelta.InnerTxns[1].ApplyData
@@ -410,8 +606,6 @@ func GetTestScenarios() map[string]TestScenarioGenerator {
expectedAD.EvalDelta.Logs = expectedAD.EvalDelta.Logs[:2]
expectedAD.EvalDelta.InnerTxns[1].Txn.Amount.Raw = math.MaxUint64
-
- program := fillProgramTemplate("", successInnerProgram, "", math.MaxUint64, 2, "pushint 1")
return TestScenario{
Outcome: ErrorOutcome,
Program: program,
@@ -433,7 +627,7 @@ func GetTestScenarios() map[string]TestScenarioGenerator {
{
AfterProgram(logic.ModeApp, false),
AfterTxn(protocol.ApplicationCallTx, expectedInnerAppCallAD, false),
- AfterTxnGroup(1, false), // end first itxn group
+ AfterTxnGroup(1, nil, false), // end first itxn group
AfterOpcode(false),
},
OpcodeEvents(16, false),
@@ -442,20 +636,29 @@ func GetTestScenarios() map[string]TestScenarioGenerator {
BeforeTxnGroup(2), // start second itxn group
BeforeTxn(protocol.PaymentTx),
AfterTxn(protocol.PaymentTx, expectedInnerPay1AD, true),
- AfterTxnGroup(2, true), // end second itxn group
+ AfterTxnGroup(2, nil, true), // end second itxn group
AfterOpcode(true),
AfterProgram(logic.ModeApp, true),
AfterTxn(protocol.ApplicationCallTx, expectedADNoED, true),
},
}),
ExpectedSimulationAD: expectedAD,
+ ExpectedStateDelta: expectedDelta,
+ AppBudgetAdded: 2100,
+ AppBudgetConsumed: 32,
+ TxnAppBudgetConsumed: []uint64{0, 32},
}
}
scenarios[secondInnerName] = secondInner
thirdInnerName := "third inner"
thirdInner := func(info TestScenarioInfo) TestScenario {
- expectedAD := expectedApplyData(info)
+ program := fillProgramTemplate("", successInnerProgram, "", 1, math.MaxUint64, "pushint 1")
+ expectedAD, expectedDeltaCallingTxn, _, _, _ := expectedApplyDataAndStateDelta(info, program, successInnerProgramBytes)
+ expectedDelta := expectedDeltaCallingTxn
+
+ // remove failed txids from delta
+ expectedDeltaCallingTxn.Txids = nil
expectedInnerAppCallAD := expectedAD.EvalDelta.InnerTxns[0].ApplyData
expectedInnerPay1AD := expectedAD.EvalDelta.InnerTxns[1].ApplyData
@@ -469,8 +672,6 @@ func GetTestScenarios() map[string]TestScenarioGenerator {
expectedAD.EvalDelta.Logs = expectedAD.EvalDelta.Logs[:2]
expectedAD.EvalDelta.InnerTxns[2].Txn.Amount.Raw = math.MaxUint64
-
- program := fillProgramTemplate("", successInnerProgram, "", 1, math.MaxUint64, "pushint 1")
return TestScenario{
Outcome: ErrorOutcome,
Program: program,
@@ -492,7 +693,7 @@ func GetTestScenarios() map[string]TestScenarioGenerator {
{
AfterProgram(logic.ModeApp, false),
AfterTxn(protocol.ApplicationCallTx, expectedInnerAppCallAD, false),
- AfterTxnGroup(1, false), // end first itxn group
+ AfterTxnGroup(1, nil, false), // end first itxn group
AfterOpcode(false),
},
OpcodeEvents(16, false),
@@ -503,13 +704,17 @@ func GetTestScenarios() map[string]TestScenarioGenerator {
AfterTxn(protocol.PaymentTx, expectedInnerPay1AD, false),
BeforeTxn(protocol.PaymentTx),
AfterTxn(protocol.PaymentTx, expectedInnerPay2AD, true),
- AfterTxnGroup(2, true), // end second itxn group
+ AfterTxnGroup(2, nil, true), // end second itxn group
AfterOpcode(true),
AfterProgram(logic.ModeApp, true),
AfterTxn(protocol.ApplicationCallTx, expectedADNoED, true),
},
}),
ExpectedSimulationAD: expectedAD,
+ ExpectedStateDelta: expectedDelta,
+ AppBudgetAdded: 2100,
+ AppBudgetConsumed: 32,
+ TxnAppBudgetConsumed: []uint64{0, 32},
}
}
scenarios[thirdInnerName] = thirdInner
@@ -517,14 +722,19 @@ func GetTestScenarios() map[string]TestScenarioGenerator {
afterInnersName := fmt.Sprintf("after inners,error=%t", shouldError)
afterInners := func(info TestScenarioInfo) TestScenario {
- expectedAD := expectedApplyData(info)
+ program := fillProgramTemplate("", successInnerProgram, "", 1, 2, singleFailureOp)
+ expectedAD, expectedDeltaCallingTxn, _, _, _ := expectedApplyDataAndStateDelta(info, program, successInnerProgramBytes)
+ expectedDelta := expectedDeltaCallingTxn
+
+ // remove failed txids from delta
+ expectedDeltaCallingTxn.Txids = nil
+
expectedInnerAppCallAD := expectedAD.EvalDelta.InnerTxns[0].ApplyData
expectedInnerPay1AD := expectedAD.EvalDelta.InnerTxns[1].ApplyData
expectedInnerPay2AD := expectedAD.EvalDelta.InnerTxns[2].ApplyData
// EvalDeltas are removed from failed app call transactions
expectedADNoED := expectedAD
expectedADNoED.EvalDelta = transactions.EvalDelta{}
- program := fillProgramTemplate("", successInnerProgram, "", 1, 2, singleFailureOp)
return TestScenario{
Outcome: outcome,
Program: program,
@@ -546,7 +756,7 @@ func GetTestScenarios() map[string]TestScenarioGenerator {
{
AfterProgram(logic.ModeApp, false),
AfterTxn(protocol.ApplicationCallTx, expectedInnerAppCallAD, false),
- AfterTxnGroup(1, false), // end first itxn group
+ AfterTxnGroup(1, nil, false), // end first itxn group
AfterOpcode(false),
},
OpcodeEvents(16, false),
@@ -557,7 +767,7 @@ func GetTestScenarios() map[string]TestScenarioGenerator {
AfterTxn(protocol.PaymentTx, expectedInnerPay1AD, false),
BeforeTxn(protocol.PaymentTx),
AfterTxn(protocol.PaymentTx, expectedInnerPay2AD, false),
- AfterTxnGroup(2, false), // end second itxn group
+ AfterTxnGroup(2, nil, false), // end second itxn group
AfterOpcode(false),
},
OpcodeEvents(3, shouldError),
@@ -567,6 +777,10 @@ func GetTestScenarios() map[string]TestScenarioGenerator {
},
}),
ExpectedSimulationAD: expectedAD,
+ ExpectedStateDelta: expectedDelta,
+ AppBudgetAdded: 2100,
+ AppBudgetConsumed: 35,
+ TxnAppBudgetConsumed: []uint64{0, 35},
}
}
scenarios[afterInnersName] = afterInners
@@ -590,3 +804,40 @@ func StripInnerTxnGroupIDsFromEvents(events []Event) []Event {
}
return events
}
+
+// MergeStateDeltas merges multiple state deltas into one. The arguments are not modified, but the
+// first delta is used to populate non-mergeable fields in the result.
+func MergeStateDeltas(deltas ...ledgercore.StateDelta) ledgercore.StateDelta {
+ if len(deltas) == 0 {
+ return ledgercore.StateDelta{}
+ }
+
+ result := ledgercore.StateDelta{
+ // copy basic fields from the first delta
+ Hdr: deltas[0].Hdr,
+ StateProofNext: deltas[0].StateProofNext,
+ PrevTimestamp: deltas[0].PrevTimestamp,
+ Totals: deltas[0].Totals,
+
+ // initialize structure for later use
+ Txids: make(map[transactions.Txid]ledgercore.IncludedTransactions),
+ }
+ for _, delta := range deltas {
+ result.Accts.MergeAccounts(delta.Accts)
+ for key, delta := range delta.KvMods {
+ result.AddKvMod(key, delta)
+ }
+ for id, delta := range delta.Creatables {
+ result.AddCreatable(id, delta)
+ }
+ txidBase := uint64(len(result.Txids))
+ for txid, includedTx := range delta.Txids {
+ includedTx.Intra += txidBase
+ result.Txids[txid] = includedTx
+ }
+ for lease, round := range delta.Txleases {
+ result.Txleases[lease] = round
+ }
+ }
+ return result
+}
diff --git a/data/transactions/logic/mocktracer/tracer.go b/data/transactions/logic/mocktracer/tracer.go
index 2428d022e..13a6d92d7 100644
--- a/data/transactions/logic/mocktracer/tracer.go
+++ b/data/transactions/logic/mocktracer/tracer.go
@@ -17,15 +17,24 @@
package mocktracer
import (
+ "testing"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/protocol"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
// EventType represents a type of logic.EvalTracer event
type EventType string
const (
+ // BeforeBlockEvent represents the logic.EvalTracer.BeforeBlock event
+ BeforeBlockEvent EventType = "BeforeBlock"
// BeforeTxnGroupEvent represents the logic.EvalTracer.BeforeTxnGroup event
BeforeTxnGroupEvent EventType = "BeforeTxnGroup"
// AfterTxnGroupEvent represents the logic.EvalTracer.AfterTxnGroup event
@@ -42,6 +51,8 @@ const (
BeforeOpcodeEvent EventType = "BeforeOpcode"
// AfterOpcodeEvent represents the logic.EvalTracer.AfterOpcode event
AfterOpcodeEvent EventType = "AfterOpcode"
+ // AfterBlockEvent represents the logic.EvalTracer.AfterBlock event
+ AfterBlockEvent EventType = "AfterBlock"
)
// Event represents a logic.EvalTracer event
@@ -57,11 +68,22 @@ type Event struct {
// only for AfterTxn
TxnApplyData transactions.ApplyData
+ // only for AfterTxnGroup and AfterTxn
+ Deltas *ledgercore.StateDelta
+
// only for BeforeTxnGroup and AfterTxnGroup
GroupSize int
// only for AfterOpcode, AfterProgram, AfterTxn, and AfterTxnGroup
HasError bool
+
+ // only for BeforeBlock, AfterBlock
+ Round basics.Round
+}
+
+// BeforeBlock creates a new Event with the type BeforeBlockEvent for a particular round
+func BeforeBlock(round basics.Round) Event {
+ return Event{Type: BeforeBlockEvent, Round: round}
}
// BeforeTxnGroup creates a new Event with the type BeforeTxnGroupEvent
@@ -70,8 +92,8 @@ func BeforeTxnGroup(groupSize int) Event {
}
// AfterTxnGroup creates a new Event with the type AfterTxnGroupEvent
-func AfterTxnGroup(groupSize int, hasError bool) Event {
- return Event{Type: AfterTxnGroupEvent, GroupSize: groupSize, HasError: hasError}
+func AfterTxnGroup(groupSize int, deltas *ledgercore.StateDelta, hasError bool) Event {
+ return Event{Type: AfterTxnGroupEvent, GroupSize: groupSize, Deltas: copyDeltas(deltas), HasError: hasError}
}
// BeforeProgram creates a new Event with the type BeforeProgramEvent
@@ -104,6 +126,11 @@ func AfterOpcode(hasError bool) Event {
return Event{Type: AfterOpcodeEvent, HasError: hasError}
}
+// AfterBlock creates a new Event with the type AfterBlockEvent
+func AfterBlock(round basics.Round) Event {
+ return Event{Type: AfterBlockEvent, Round: round}
+}
+
// OpcodeEvents returns a slice of events that represent calling `count` opcodes
func OpcodeEvents(count int, endsWithError bool) []Event {
events := make([]Event, 0, count*2)
@@ -131,14 +158,19 @@ type Tracer struct {
Events []Event
}
+// BeforeBlock mocks the logic.EvalTracer.BeforeBlock method
+func (d *Tracer) BeforeBlock(hdr *bookkeeping.BlockHeader) {
+ d.Events = append(d.Events, BeforeBlock(hdr.Round))
+}
+
// BeforeTxnGroup mocks the logic.EvalTracer.BeforeTxnGroup method
func (d *Tracer) BeforeTxnGroup(ep *logic.EvalParams) {
d.Events = append(d.Events, BeforeTxnGroup(len(ep.TxnGroup)))
}
// AfterTxnGroup mocks the logic.EvalTracer.AfterTxnGroup method
-func (d *Tracer) AfterTxnGroup(ep *logic.EvalParams, evalError error) {
- d.Events = append(d.Events, AfterTxnGroup(len(ep.TxnGroup), evalError != nil))
+func (d *Tracer) AfterTxnGroup(ep *logic.EvalParams, deltas *ledgercore.StateDelta, evalError error) {
+ d.Events = append(d.Events, AfterTxnGroup(len(ep.TxnGroup), deltas, evalError != nil))
}
// BeforeTxn mocks the logic.EvalTracer.BeforeTxn method
@@ -170,3 +202,50 @@ func (d *Tracer) BeforeOpcode(cx *logic.EvalContext) {
func (d *Tracer) AfterOpcode(cx *logic.EvalContext, evalError error) {
d.Events = append(d.Events, AfterOpcode(evalError != nil))
}
+
+// AfterBlock mocks the logic.EvalTracer.BeforeBlock method
+func (d *Tracer) AfterBlock(hdr *bookkeeping.BlockHeader) {
+ d.Events = append(d.Events, AfterBlock(hdr.Round))
+}
+
+// copyDeltas makes a deep copy of the given ledgercore.StateDelta pointer, if it's not nil.
+// This is inefficient, but it should only be used for testing.
+func copyDeltas(deltas *ledgercore.StateDelta) *ledgercore.StateDelta {
+ if deltas == nil {
+ return nil
+ }
+ encoded := protocol.EncodeReflect(deltas)
+ var clone ledgercore.StateDelta
+ err := protocol.DecodeReflect(encoded, &clone)
+ if err != nil {
+ panic(err)
+ }
+ return &clone
+}
+
+// AssertEventsEqual asserts that two slices of Events are equal, taking into account complex
+// equality of StateDeltas. The arguments will be modified in-place to normalize any StateDeltas.
+func AssertEventsEqual(t *testing.T, expected, actual []Event) {
+ t.Helper()
+
+ // Dehydrate deltas for better comparison
+ for i := range expected {
+ if expected[i].Deltas != nil {
+ expected[i].Deltas.Dehydrate()
+ }
+ }
+ for i := range actual {
+ if actual[i].Deltas != nil {
+ actual[i].Deltas.Dehydrate()
+ }
+ }
+
+ // These extra checks are not necessary for correctness, but they provide more targeted information on failure
+ if assert.Equal(t, len(expected), len(actual)) {
+ for i := range expected {
+ assert.Equal(t, expected[i].Deltas, actual[i].Deltas, "StateDelta disagreement: i=%d, expected event type: %v, actual event type: %v", i, expected[i].Type, actual[i].Type)
+ }
+ }
+
+ require.Equal(t, expected, actual)
+}
diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go
index 5dd6f20d1..90c52f086 100644
--- a/data/transactions/logic/opcodes.go
+++ b/data/transactions/logic/opcodes.go
@@ -24,7 +24,7 @@ import (
)
// LogicVersion defines default assembler and max eval versions
-const LogicVersion = 9
+const LogicVersion = 10
// rekeyingEnabledVersion is the version of TEAL where RekeyTo functionality
// was enabled. This is important to remember so that old TEAL accounts cannot
@@ -66,10 +66,12 @@ const fidoVersion = 7 // base64, json, secp256r1
const randomnessVersion = 7 // vrf_verify, block
const fpVersion = 8 // changes for frame pointers and simpler function discipline
+const sharedResourcesVersion = 9 // apps can access resources from other transactions.
+
// EXPERIMENTAL. These should be revisited whenever a new LogicSigVersion is
// moved from vFuture to a new consensus version. If they remain unready, bump
// their version, and fixup TestAssemble() in assembler_test.go.
-const pairingVersion = 9 // bn256 opcodes. will add bls12-381, and unify the available opcodes.
+const pairingVersion = 10 // bn256 opcodes. will add bls12-381, and unify the available opcodes.
// Unlimited Global Storage opcodes
const boxVersion = 8 // box_*
@@ -317,6 +319,28 @@ const (
immLabels
)
+func (ik immKind) String() string {
+ switch ik {
+ case immByte:
+ return "uint8"
+ case immInt8:
+ return "int8"
+ case immLabel:
+ return "int16 (big-endian)"
+ case immInt:
+ return "varuint"
+ case immBytes:
+ return "varuint length, bytes"
+ case immInts:
+ return fmt.Sprintf("varuint count, [%s ...]", immInt.String())
+ case immBytess: // "ss" not a typo. Multiple "bytes"
+ return fmt.Sprintf("varuint count, [%s ...]", immBytes.String())
+ case immLabels:
+ return fmt.Sprintf("varuint count, [%s ...]", immLabel.String())
+ }
+ return "unknown"
+}
+
type immediate struct {
Name string
kind immKind
@@ -377,7 +401,7 @@ type OpSpec struct {
// AlwaysExits is true iff the opcode always ends the program.
func (spec *OpSpec) AlwaysExits() bool {
- return len(spec.Return.Types) == 1 && spec.Return.Types[0] == StackNone
+ return len(spec.Return.Types) == 1 && spec.Return.Types[0].AVMType == avmNone
}
func (spec *OpSpec) deadens() bool {
@@ -397,17 +421,17 @@ func (spec *OpSpec) deadens() bool {
// assembly-time, with ops.returns()
var OpSpecs = []OpSpec{
{0x00, "err", opErr, proto(":x"), 1, detDefault()},
- {0x01, "sha256", opSHA256, proto("b:b"), 1, costly(7)},
- {0x02, "keccak256", opKeccak256, proto("b:b"), 1, costly(26)},
- {0x03, "sha512_256", opSHA512_256, proto("b:b"), 1, costly(9)},
+ {0x01, "sha256", opSHA256, proto("b:H"), 1, costly(7)},
+ {0x02, "keccak256", opKeccak256, proto("b:H"), 1, costly(26)},
+ {0x03, "sha512_256", opSHA512_256, proto("b:H"), 1, costly(9)},
// Cost of these opcodes increases in AVM version 2 based on measured
// performance. Should be able to run max hashes during stateful TEAL
// and achieve reasonable TPS. Same opcode for different versions
// is OK.
- {0x01, "sha256", opSHA256, proto("b:b"), 2, costly(35)},
- {0x02, "keccak256", opKeccak256, proto("b:b"), 2, costly(130)},
- {0x03, "sha512_256", opSHA512_256, proto("b:b"), 2, costly(45)},
+ {0x01, "sha256", opSHA256, proto("b:H"), 2, costly(35)},
+ {0x02, "keccak256", opKeccak256, proto("b:H"), 2, costly(130)},
+ {0x03, "sha512_256", opSHA512_256, proto("b:H"), 2, costly(45)},
/*
Tabling these changes until we offer unlimited global storage as there
@@ -419,10 +443,10 @@ var OpSpecs = []OpSpec{
{0x03, "sha512_256", opSHA512_256, proto("b:b"), 7, unlimitedStorage, costByLength(17, 5, 8)},
*/
- {0x04, "ed25519verify", opEd25519Verify, proto("bbb:i"), 1, costly(1900).only(ModeSig)},
- {0x04, "ed25519verify", opEd25519Verify, proto("bbb:i"), 5, costly(1900)},
+ {0x04, "ed25519verify", opEd25519Verify, proto("bbb:T"), 1, costly(1900).only(ModeSig)},
+ {0x04, "ed25519verify", opEd25519Verify, proto("bbb:T"), 5, costly(1900)},
- {0x05, "ecdsa_verify", opEcdsaVerify, proto("bbbbb:i"), 5, costByField("v", &EcdsaCurves, ecdsaVerifyCosts)},
+ {0x05, "ecdsa_verify", opEcdsaVerify, proto("bbbbb:T"), 5, costByField("v", &EcdsaCurves, ecdsaVerifyCosts)},
{0x06, "ecdsa_pk_decompress", opEcdsaPkDecompress, proto("b:bb"), 5, costByField("v", &EcdsaCurves, ecdsaDecompressCosts)},
{0x07, "ecdsa_pk_recover", opEcdsaPkRecover, proto("bibb:bb"), 5, field("v", &EcdsaCurves).costs(2000)},
@@ -430,14 +454,14 @@ var OpSpecs = []OpSpec{
{0x09, "-", opMinus, proto("ii:i"), 1, detDefault()},
{0x0a, "/", opDiv, proto("ii:i"), 1, detDefault()},
{0x0b, "*", opMul, proto("ii:i"), 1, detDefault()},
- {0x0c, "<", opLt, proto("ii:i"), 1, detDefault()},
- {0x0d, ">", opGt, proto("ii:i"), 1, detDefault()},
- {0x0e, "<=", opLe, proto("ii:i"), 1, detDefault()},
- {0x0f, ">=", opGe, proto("ii:i"), 1, detDefault()},
- {0x10, "&&", opAnd, proto("ii:i"), 1, detDefault()},
- {0x11, "||", opOr, proto("ii:i"), 1, detDefault()},
- {0x12, "==", opEq, proto("aa:i"), 1, typed(typeEquals)},
- {0x13, "!=", opNeq, proto("aa:i"), 1, typed(typeEquals)},
+ {0x0c, "<", opLt, proto("ii:T"), 1, detDefault()},
+ {0x0d, ">", opGt, proto("ii:T"), 1, detDefault()},
+ {0x0e, "<=", opLe, proto("ii:T"), 1, detDefault()},
+ {0x0f, ">=", opGe, proto("ii:T"), 1, detDefault()},
+ {0x10, "&&", opAnd, proto("ii:T"), 1, detDefault()},
+ {0x11, "||", opOr, proto("ii:T"), 1, detDefault()},
+ {0x12, "==", opEq, proto("aa:T"), 1, typed(typeEquals)},
+ {0x13, "!=", opNeq, proto("aa:T"), 1, typed(typeEquals)},
{0x14, "!", opNot, proto("i:i"), 1, detDefault()},
{0x15, "len", opLen, proto("b:i"), 1, detDefault()},
{0x16, "itob", opItob, proto("i:b"), 1, detDefault()},
@@ -528,26 +552,25 @@ var OpSpecs = []OpSpec{
{0x60, "balance", opBalance, proto("i:i"), 2, only(ModeApp)},
{0x60, "balance", opBalance, proto("a:i"), directRefEnabledVersion, only(ModeApp)},
- {0x61, "app_opted_in", opAppOptedIn, proto("ii:i"), 2, only(ModeApp)},
- {0x61, "app_opted_in", opAppOptedIn, proto("ai:i"), directRefEnabledVersion, only(ModeApp)},
+ {0x61, "app_opted_in", opAppOptedIn, proto("ii:T"), 2, only(ModeApp)},
+ {0x61, "app_opted_in", opAppOptedIn, proto("ai:T"), directRefEnabledVersion, only(ModeApp)},
{0x62, "app_local_get", opAppLocalGet, proto("ib:a"), 2, only(ModeApp)},
{0x62, "app_local_get", opAppLocalGet, proto("ab:a"), directRefEnabledVersion, only(ModeApp)},
- {0x63, "app_local_get_ex", opAppLocalGetEx, proto("iib:ai"), 2, only(ModeApp)},
- {0x63, "app_local_get_ex", opAppLocalGetEx, proto("aib:ai"), directRefEnabledVersion, only(ModeApp)},
+ {0x63, "app_local_get_ex", opAppLocalGetEx, proto("iib:aT"), 2, only(ModeApp)},
+ {0x63, "app_local_get_ex", opAppLocalGetEx, proto("aib:aT"), directRefEnabledVersion, only(ModeApp)},
{0x64, "app_global_get", opAppGlobalGet, proto("b:a"), 2, only(ModeApp)},
- {0x65, "app_global_get_ex", opAppGlobalGetEx, proto("ib:ai"), 2, only(ModeApp)},
+ {0x65, "app_global_get_ex", opAppGlobalGetEx, proto("ib:aT"), 2, only(ModeApp)},
{0x66, "app_local_put", opAppLocalPut, proto("iba:"), 2, only(ModeApp)},
{0x66, "app_local_put", opAppLocalPut, proto("aba:"), directRefEnabledVersion, only(ModeApp)},
{0x67, "app_global_put", opAppGlobalPut, proto("ba:"), 2, only(ModeApp)},
{0x68, "app_local_del", opAppLocalDel, proto("ib:"), 2, only(ModeApp)},
{0x68, "app_local_del", opAppLocalDel, proto("ab:"), directRefEnabledVersion, only(ModeApp)},
{0x69, "app_global_del", opAppGlobalDel, proto("b:"), 2, only(ModeApp)},
-
- {0x70, "asset_holding_get", opAssetHoldingGet, proto("ii:ai"), 2, field("f", &AssetHoldingFields).only(ModeApp)},
- {0x70, "asset_holding_get", opAssetHoldingGet, proto("ai:ai"), directRefEnabledVersion, field("f", &AssetHoldingFields).only(ModeApp)},
- {0x71, "asset_params_get", opAssetParamsGet, proto("i:ai"), 2, field("f", &AssetParamsFields).only(ModeApp)},
- {0x72, "app_params_get", opAppParamsGet, proto("i:ai"), 5, field("f", &AppParamsFields).only(ModeApp)},
- {0x73, "acct_params_get", opAcctParamsGet, proto("a:ai"), 6, field("f", &AcctParamsFields).only(ModeApp)},
+ {0x70, "asset_holding_get", opAssetHoldingGet, proto("ii:aT"), 2, field("f", &AssetHoldingFields).only(ModeApp)},
+ {0x70, "asset_holding_get", opAssetHoldingGet, proto("ai:aT"), directRefEnabledVersion, field("f", &AssetHoldingFields).only(ModeApp)},
+ {0x71, "asset_params_get", opAssetParamsGet, proto("i:aT"), 2, field("f", &AssetParamsFields).only(ModeApp)},
+ {0x72, "app_params_get", opAppParamsGet, proto("i:aT"), 5, field("f", &AppParamsFields).only(ModeApp)},
+ {0x73, "acct_params_get", opAcctParamsGet, proto("a:aT"), 6, field("f", &AcctParamsFields).only(ModeApp)},
{0x78, "min_balance", opMinBalance, proto("i:i"), 3, only(ModeApp)},
{0x78, "min_balance", opMinBalance, proto("a:i"), directRefEnabledVersion, only(ModeApp)},
@@ -558,7 +581,7 @@ var OpSpecs = []OpSpec{
{0x82, "pushbytess", opPushBytess, proto(":", "", "[N items]"), 8, constants(asmPushBytess, checkByteImmArgs, "bytes ...", immBytess).typed(typePushBytess).trust()},
{0x83, "pushints", opPushInts, proto(":", "", "[N items]"), 8, constants(asmPushInts, checkIntImmArgs, "uint ...", immInts).typed(typePushInts).trust()},
- {0x84, "ed25519verify_bare", opEd25519VerifyBare, proto("bbb:i"), 7, costly(1900)},
+ {0x84, "ed25519verify_bare", opEd25519VerifyBare, proto("bbb:T"), 7, costly(1900)},
// "Function oriented"
{0x88, "callsub", opCallSub, proto(":"), 4, detBranch()},
@@ -589,22 +612,22 @@ var OpSpecs = []OpSpec{
{0x9b, "bn256_pairing", opBn256Pairing, proto("bb:i"), pairingVersion, costly(8700)},
// Byteslice math.
- {0xa0, "b+", opBytesPlus, proto("bb:b"), 4, costly(10)},
- {0xa1, "b-", opBytesMinus, proto("bb:b"), 4, costly(10)},
- {0xa2, "b/", opBytesDiv, proto("bb:b"), 4, costly(20)},
- {0xa3, "b*", opBytesMul, proto("bb:b"), 4, costly(20)},
- {0xa4, "b<", opBytesLt, proto("bb:i"), 4, detDefault()},
- {0xa5, "b>", opBytesGt, proto("bb:i"), 4, detDefault()},
- {0xa6, "b<=", opBytesLe, proto("bb:i"), 4, detDefault()},
- {0xa7, "b>=", opBytesGe, proto("bb:i"), 4, detDefault()},
- {0xa8, "b==", opBytesEq, proto("bb:i"), 4, detDefault()},
- {0xa9, "b!=", opBytesNeq, proto("bb:i"), 4, detDefault()},
+ {0xa0, "b+", opBytesPlus, proto("II:b"), 4, costly(10).typed(typeByteMath(maxByteMathSize + 1))},
+ {0xa1, "b-", opBytesMinus, proto("II:I"), 4, costly(10)},
+ {0xa2, "b/", opBytesDiv, proto("II:I"), 4, costly(20)},
+ {0xa3, "b*", opBytesMul, proto("II:b"), 4, costly(20).typed(typeByteMath(maxByteMathSize * 2))},
+ {0xa4, "b<", opBytesLt, proto("II:T"), 4, detDefault()},
+ {0xa5, "b>", opBytesGt, proto("II:T"), 4, detDefault()},
+ {0xa6, "b<=", opBytesLe, proto("II:T"), 4, detDefault()},
+ {0xa7, "b>=", opBytesGe, proto("II:T"), 4, detDefault()},
+ {0xa8, "b==", opBytesEq, proto("II:T"), 4, detDefault()},
+ {0xa9, "b!=", opBytesNeq, proto("II:T"), 4, detDefault()},
{0xaa, "b%", opBytesModulo, proto("bb:b"), 4, costly(20)},
{0xab, "b|", opBytesBitOr, proto("bb:b"), 4, costly(6)},
{0xac, "b&", opBytesBitAnd, proto("bb:b"), 4, costly(6)},
{0xad, "b^", opBytesBitXor, proto("bb:b"), 4, costly(6)},
{0xae, "b~", opBytesBitNot, proto("b:b"), 4, costly(4)},
- {0xaf, "bzero", opBytesZero, proto("i:b"), 4, detDefault()},
+ {0xaf, "bzero", opBytesZero, proto("i:b"), 4, detDefault().typed(typeBzero)},
// AVM "effects"
{0xb0, "log", opLog, proto("b:"), 5, only(ModeApp)},
@@ -618,13 +641,13 @@ var OpSpecs = []OpSpec{
{0xb8, "gitxna", opGitxna, proto(":a"), 6, immediates("t", "f", "i").field("f", &TxnArrayFields).only(ModeApp)},
// Unlimited Global Storage - Boxes
- {0xb9, "box_create", opBoxCreate, proto("bi:i"), boxVersion, only(ModeApp)},
- {0xba, "box_extract", opBoxExtract, proto("bii:b"), boxVersion, only(ModeApp)},
- {0xbb, "box_replace", opBoxReplace, proto("bib:"), boxVersion, only(ModeApp)},
- {0xbc, "box_del", opBoxDel, proto("b:i"), boxVersion, only(ModeApp)},
- {0xbd, "box_len", opBoxLen, proto("b:ii"), boxVersion, only(ModeApp)},
- {0xbe, "box_get", opBoxGet, proto("b:bi"), boxVersion, only(ModeApp)},
- {0xbf, "box_put", opBoxPut, proto("bb:"), boxVersion, only(ModeApp)},
+ {0xb9, "box_create", opBoxCreate, proto("Ni:T"), boxVersion, only(ModeApp)},
+ {0xba, "box_extract", opBoxExtract, proto("Nii:b"), boxVersion, only(ModeApp)},
+ {0xbb, "box_replace", opBoxReplace, proto("Nib:"), boxVersion, only(ModeApp)},
+ {0xbc, "box_del", opBoxDel, proto("N:T"), boxVersion, only(ModeApp)},
+ {0xbd, "box_len", opBoxLen, proto("N:iT"), boxVersion, only(ModeApp)},
+ {0xbe, "box_get", opBoxGet, proto("N:bT"), boxVersion, only(ModeApp)},
+ {0xbf, "box_put", opBoxPut, proto("Nb:"), boxVersion, only(ModeApp)},
// Dynamic indexing
{0xc0, "txnas", opTxnas, proto("i:a"), 5, field("f", &TxnArrayFields)},
@@ -636,7 +659,7 @@ var OpSpecs = []OpSpec{
{0xc6, "gitxnas", opGitxnas, proto("i:a"), 6, immediates("t", "f").field("f", &TxnArrayFields).only(ModeApp)},
// randomness support
- {0xd0, "vrf_verify", opVrfVerify, proto("bbb:bi"), randomnessVersion, field("s", &VrfStandards).costs(5700)},
+ {0xd0, "vrf_verify", opVrfVerify, proto("bbb:bT"), randomnessVersion, field("s", &VrfStandards).costs(5700)},
{0xd1, "block", opBlock, proto("i:a"), randomnessVersion, field("f", &BlockFields)},
}
diff --git a/data/transactions/logic/resources.go b/data/transactions/logic/resources.go
new file mode 100644
index 000000000..00045100e
--- /dev/null
+++ b/data/transactions/logic/resources.go
@@ -0,0 +1,364 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package logic
+
+import (
+ "fmt"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+// resources contains a catalog of available resources. It's used to track the
+// apps, assets, and boxes that are available to a transaction, outside the
+// direct foreign array mechanism.
+type resources struct {
+ // These resources were created previously in the group, so they can be used
+ // by later transactions.
+ createdAsas map[basics.AssetIndex]struct{}
+ createdApps map[basics.AppIndex]struct{}
+
+ // These resources have been used by some txn in the group, so they are
+ // available. These maps track the availability of the basic objects (often
+ // called "params"), not the "cross-product" objects (which are tracked
+ // below)
+ sharedAccounts map[basics.Address]struct{}
+ sharedAsas map[basics.AssetIndex]struct{}
+ sharedApps map[basics.AppIndex]struct{}
+ // We need to carefully track the "cross-product" availability, because if
+ // tx0 mentions an account A, and tx1 mentions an ASA X, that does _not_
+ // make the holding AX available
+ sharedHoldings map[ledgercore.AccountAsset]struct{}
+ sharedLocals map[ledgercore.AccountApp]struct{}
+
+ // boxes are all of the top-level box refs from the txgroup. Most are added
+ // during NewEvalParams(). refs using 0 on an appl create are resolved and
+ // added when the appl executes. The boolean value indicates the "dirtiness"
+ // of the box - has it been modified in this txngroup? If yes, the size of
+ // the box counts against the group writeBudget. So delete is NOT a dirtying
+ // operation.
+ boxes map[boxRef]bool
+
+ // dirtyBytes maintains a running count of the number of dirty bytes in `boxes`
+ dirtyBytes uint64
+}
+
+func (r *resources) shareHolding(addr basics.Address, id basics.AssetIndex) {
+ r.sharedHoldings[ledgercore.AccountAsset{Address: addr, Asset: id}] = struct{}{}
+}
+
+func (r *resources) shareAccountAndHolding(addr basics.Address, id basics.AssetIndex) {
+ r.sharedAccounts[addr] = struct{}{}
+ if id != 0 {
+ r.sharedHoldings[ledgercore.AccountAsset{Address: addr, Asset: id}] = struct{}{}
+ }
+}
+
+func (r *resources) shareLocal(addr basics.Address, id basics.AppIndex) {
+ r.sharedLocals[ledgercore.AccountApp{Address: addr, App: id}] = struct{}{}
+}
+
+// In the fill* and allows* routines, we pass the header and the fields in
+// separately, even though they are pointers into the same structure. That
+// prevents dumb attempts to use other fields from the transaction.
+
+func (r *resources) fill(tx *transactions.Transaction, ep *EvalParams) {
+ switch tx.Type {
+ case protocol.PaymentTx:
+ r.fillPayment(&tx.Header, &tx.PaymentTxnFields)
+ case protocol.KeyRegistrationTx:
+ r.fillKeyRegistration(&tx.Header)
+ case protocol.AssetConfigTx:
+ r.fillAssetConfig(&tx.Header, &tx.AssetConfigTxnFields)
+ case protocol.AssetTransferTx:
+ r.fillAssetTransfer(&tx.Header, &tx.AssetTransferTxnFields)
+ case protocol.AssetFreezeTx:
+ r.fillAssetFreeze(&tx.Header, &tx.AssetFreezeTxnFields)
+ case protocol.ApplicationCallTx:
+ r.fillApplicationCall(ep, &tx.Header, &tx.ApplicationCallTxnFields)
+ case protocol.StateProofTx:
+ // state proof txns add nothing to availability (they can't even appear
+ // in a group with an appl. but still.)
+ default:
+ panic(tx.Type)
+ }
+}
+
+func (cx *EvalContext) allows(tx *transactions.Transaction, calleeVer uint64) error {
+ // if the caller is pre-sharing, it can't prepare transactions with
+ // resources that are not available, so `tx` is surely legal.
+ if cx.version < sharedResourcesVersion {
+ // this is important, not just an optimization, because a pre-sharing
+ // creation txn has access to the app and app account it is currently
+ // creating (and therefore can pass that access down), but cx.available
+ // doesn't track that properly until v9's protocol upgrade. See
+ // TestInnerAppCreateAndOptin for an example.
+ return nil
+ }
+ switch tx.Type {
+ case protocol.PaymentTx, protocol.KeyRegistrationTx, protocol.AssetConfigTx:
+ // these transactions don't touch cross-product resources, so no error is possible
+ return nil
+ case protocol.AssetTransferTx:
+ return cx.allowsAssetTransfer(&tx.Header, &tx.AssetTransferTxnFields)
+ case protocol.AssetFreezeTx:
+ return cx.allowsAssetFreeze(&tx.Header, &tx.AssetFreezeTxnFields)
+ case protocol.ApplicationCallTx:
+ return cx.allowsApplicationCall(&tx.Header, &tx.ApplicationCallTxnFields, calleeVer)
+ default:
+ return fmt.Errorf("unknown inner transaction type %s", tx.Type)
+ }
+}
+
+func (r *resources) fillKeyRegistration(hdr *transactions.Header) {
+ r.sharedAccounts[hdr.Sender] = struct{}{}
+}
+
+func (r *resources) fillPayment(hdr *transactions.Header, tx *transactions.PaymentTxnFields) {
+ r.sharedAccounts[hdr.Sender] = struct{}{}
+ r.sharedAccounts[tx.Receiver] = struct{}{}
+ if !tx.CloseRemainderTo.IsZero() {
+ r.sharedAccounts[tx.CloseRemainderTo] = struct{}{}
+ }
+}
+
+func (r *resources) fillAssetConfig(hdr *transactions.Header, tx *transactions.AssetConfigTxnFields) {
+ r.sharedAccounts[hdr.Sender] = struct{}{}
+ if id := tx.ConfigAsset; id != 0 {
+ r.sharedAsas[id] = struct{}{}
+ }
+ // We don't need to read the special addresses, so they don't go in.
+}
+
+func (r *resources) fillAssetTransfer(hdr *transactions.Header, tx *transactions.AssetTransferTxnFields) {
+ id := tx.XferAsset
+ r.sharedAsas[id] = struct{}{}
+ r.shareAccountAndHolding(hdr.Sender, id)
+ r.shareAccountAndHolding(tx.AssetReceiver, id)
+
+ if !tx.AssetSender.IsZero() {
+ r.shareAccountAndHolding(tx.AssetSender, id)
+ }
+
+ if !tx.AssetCloseTo.IsZero() {
+ r.shareAccountAndHolding(tx.AssetCloseTo, id)
+ }
+}
+
+// allowsHolding checks if a holding is available under the txgroup sharing rules
+func (cx *EvalContext) allowsHolding(addr basics.Address, ai basics.AssetIndex) bool {
+ r := cx.available
+ if _, ok := r.sharedHoldings[ledgercore.AccountAsset{Address: addr, Asset: ai}]; ok {
+ return true
+ }
+ // If an ASA was created in this group, then allow holding access for any allowed account.
+ if _, ok := r.createdAsas[ai]; ok {
+ return cx.availableAccount(addr)
+ }
+ // If the address was "created" by making its app in this group, then allow for available assets.
+ for created := range r.createdApps {
+ if cx.getApplicationAddress(created) == addr {
+ return cx.availableAsset(ai)
+ }
+ }
+ // If the current txn is a creation, the new appID won't be in r.createdApps
+ // yet, but it should get the same special treatment.
+ if cx.txn.Txn.ApplicationID == 0 && cx.getApplicationAddress(cx.appID) == addr {
+ return cx.availableAsset(ai)
+ }
+ return false
+}
+
+// allowsLocals checks if a local state is available under the txgroup sharing rules
+func (cx *EvalContext) allowsLocals(addr basics.Address, ai basics.AppIndex) bool {
+ r := cx.available
+ if _, ok := r.sharedLocals[ledgercore.AccountApp{Address: addr, App: ai}]; ok {
+ return true
+ }
+ // All locals of created apps are available
+ if _, ok := r.createdApps[ai]; ok {
+ return cx.availableAccount(addr)
+ }
+ if cx.txn.Txn.ApplicationID == 0 && cx.appID == ai {
+ return cx.availableAccount(addr)
+ }
+
+ // All locals of created app accounts are available
+ for created := range r.createdApps {
+ if cx.getApplicationAddress(created) == addr {
+ return cx.availableApp(ai)
+ }
+ }
+ if cx.txn.Txn.ApplicationID == 0 && cx.getApplicationAddress(cx.appID) == addr {
+ return cx.availableApp(ai)
+ }
+ return false
+}
+
+func (cx *EvalContext) requireHolding(acct basics.Address, id basics.AssetIndex) error {
+ /* Previous versions allowed inner appls with zeros in "required" places,
+ even if that 0 resource should have be inaccessible, because the check
+ was done at itxn_field time, and maybe the app simply didn't set the
+ field. */
+ if id == 0 || acct.IsZero() {
+ return nil
+ }
+ if !cx.allowsHolding(acct, id) {
+ return fmt.Errorf("unavailable Holding %s x %d would be accessible", acct, id)
+ }
+ return nil
+}
+
+func (cx *EvalContext) requireLocals(acct basics.Address, id basics.AppIndex) error {
+ if !cx.allowsLocals(acct, id) {
+ return fmt.Errorf("unavailable Local State %s x %d would be accessible", acct, id)
+ }
+ return nil
+}
+
+func (cx *EvalContext) allowsAssetTransfer(hdr *transactions.Header, tx *transactions.AssetTransferTxnFields) error {
+ err := cx.requireHolding(hdr.Sender, tx.XferAsset)
+ if err != nil {
+ return fmt.Errorf("axfer Sender: %w", err)
+ }
+ err = cx.requireHolding(tx.AssetReceiver, tx.XferAsset)
+ if err != nil {
+ return fmt.Errorf("axfer AssetReceiver: %w", err)
+ }
+ err = cx.requireHolding(tx.AssetSender, tx.XferAsset)
+ if err != nil {
+ return fmt.Errorf("axfer AssetSender: %w", err)
+ }
+ err = cx.requireHolding(tx.AssetCloseTo, tx.XferAsset)
+ if err != nil {
+ return fmt.Errorf("axfer AssetCloseTo: %w", err)
+ }
+ return nil
+}
+
+func (r *resources) fillAssetFreeze(hdr *transactions.Header, tx *transactions.AssetFreezeTxnFields) {
+ r.sharedAccounts[hdr.Sender] = struct{}{}
+ id := tx.FreezeAsset
+ r.sharedAsas[id] = struct{}{}
+ r.shareAccountAndHolding(tx.FreezeAccount, id)
+}
+
+func (cx *EvalContext) allowsAssetFreeze(hdr *transactions.Header, tx *transactions.AssetFreezeTxnFields) error {
+ err := cx.requireHolding(tx.FreezeAccount, tx.FreezeAsset)
+ if err != nil {
+ return fmt.Errorf("afrz FreezeAccount: %w", err)
+ }
+ return nil
+}
+
+func (r *resources) fillApplicationCall(ep *EvalParams, hdr *transactions.Header, tx *transactions.ApplicationCallTxnFields) {
+ txAccounts := make([]basics.Address, 0, 2+len(tx.Accounts)+len(tx.ForeignApps))
+ txAccounts = append(txAccounts, hdr.Sender)
+ txAccounts = append(txAccounts, tx.Accounts...)
+ for _, id := range tx.ForeignAssets {
+ r.sharedAsas[id] = struct{}{}
+ }
+ // Make the app account associated with app calls available. We
+ // don't have to add code to make the accounts of freshly created
+ // apps available, because that is already handled by looking at
+ // `createdApps`.
+ if id := tx.ApplicationID; id != 0 {
+ txAccounts = append(txAccounts, ep.getApplicationAddress(id))
+ r.sharedApps[id] = struct{}{}
+ }
+ for _, id := range tx.ForeignApps {
+ txAccounts = append(txAccounts, ep.getApplicationAddress(id))
+ r.sharedApps[id] = struct{}{}
+ }
+ for _, address := range txAccounts {
+ r.sharedAccounts[address] = struct{}{}
+
+ for _, id := range tx.ForeignAssets {
+ r.shareHolding(address, id)
+ }
+ // Similar to note about app accounts, availableLocals allows
+ // all createdApps holdings, so we don't care if id == 0 here.
+ if id := tx.ApplicationID; id != 0 {
+ r.shareLocal(address, id)
+ }
+ for _, id := range tx.ForeignApps {
+ r.shareLocal(address, id)
+ }
+ }
+
+ for _, br := range tx.Boxes {
+ var app basics.AppIndex
+ if br.Index == 0 {
+ // "current app": Ignore if this is a create, else use ApplicationID
+ if tx.ApplicationID == 0 {
+ // When the create actually happens, and we learn the appID, we'll add it.
+ continue
+ }
+ app = tx.ApplicationID
+ } else {
+ // Bounds check will already have been done by
+ // WellFormed. For testing purposes, it's better to panic
+ // now than after returning a nil.
+ app = tx.ForeignApps[br.Index-1] // shift for the 0=this convention
+ }
+ r.boxes[boxRef{app, string(br.Name)}] = false
+ }
+}
+
+func (cx *EvalContext) allowsApplicationCall(hdr *transactions.Header, tx *transactions.ApplicationCallTxnFields, calleeVer uint64) error {
+ // If the callee is at least sharedResourcesVersion, then it will check
+ // availability properly itself.
+ if calleeVer >= sharedResourcesVersion {
+ return nil
+ }
+
+ // This should closely match the `fillApplicationCall` routine, as the idea
+ // is to find all of the cross product resources this attempted call will
+ // have access to, and check that they are already available.
+ txAccounts := make([]basics.Address, 0, 2+len(tx.Accounts)+len(tx.ForeignApps))
+ txAccounts = append(txAccounts, hdr.Sender)
+ txAccounts = append(txAccounts, tx.Accounts...)
+ if id := tx.ApplicationID; id != 0 {
+ txAccounts = append(txAccounts, cx.getApplicationAddress(id))
+ }
+ for _, id := range tx.ForeignApps {
+ txAccounts = append(txAccounts, cx.getApplicationAddress(id))
+ }
+ for _, address := range txAccounts {
+ for _, id := range tx.ForeignAssets {
+ err := cx.requireHolding(address, id)
+ if err != nil {
+ return fmt.Errorf("appl ForeignAssets: %w", err)
+ }
+ }
+ if id := tx.ApplicationID; id != 0 {
+ err := cx.requireLocals(address, id)
+ if err != nil {
+ return fmt.Errorf("appl ApplicationID: %w", err)
+ }
+ }
+ for _, id := range tx.ForeignApps {
+ err := cx.requireLocals(address, id)
+ if err != nil {
+ return fmt.Errorf("appl ForeignApps: %w", err)
+ }
+ }
+ }
+ return nil
+}
diff --git a/data/transactions/logic/resources_test.go b/data/transactions/logic/resources_test.go
new file mode 100644
index 000000000..4437f1183
--- /dev/null
+++ b/data/transactions/logic/resources_test.go
@@ -0,0 +1,870 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package logic_test
+
+import (
+ "encoding/binary"
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/data/txntest"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+// TestAppSharing confirms that as of v9, apps can be accessed across groups,
+// but that before then, they could not.
+func TestAppSharing(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // Create some sample transactions. The main reason this a blackbox test
+ // (_test package) is to have access to txntest.
+ appl0 := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ ApplicationID: 900,
+ Sender: basics.Address{1, 2, 3, 4},
+ ForeignApps: []basics.AppIndex{500},
+ }
+
+ appl1 := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ ApplicationID: 901,
+ Sender: basics.Address{4, 3, 2, 1},
+ }
+
+ appl2 := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ ApplicationID: 902,
+ Sender: basics.Address{1, 2, 3, 4},
+ }
+
+ getSchema := "int 500; app_params_get AppGlobalNumByteSlice; !; assert; pop; int 1"
+ // In v8, the first tx can read app params of 500, because it's in its
+ // foreign array, but the second can't
+ logic.TestApps(t, []string{getSchema, getSchema}, txntest.Group(&appl0, &appl1), 8, nil,
+ logic.Exp(1, "unavailable App 500"))
+ // In v9, the second can, because the first can.
+ logic.TestApps(t, []string{getSchema, getSchema}, txntest.Group(&appl0, &appl1), 9, nil)
+
+ getLocalEx := `txn Sender; int 500; byte "some-key"; app_local_get_ex; pop; pop; int 1`
+
+ // In contrast, here there's no help from v9, because the second tx is
+ // reading the locals for a different account.
+
+ // app_local_get* requires the address and the app exist, else the program fails
+ logic.TestApps(t, []string{getLocalEx, getLocalEx}, txntest.Group(&appl0, &appl1), 8, nil,
+ logic.Exp(0, "no account"))
+
+ _, _, ledger := logic.MakeSampleEnv()
+ ledger.NewAccount(appl0.Sender, 100_000)
+ ledger.NewAccount(appl1.Sender, 100_000)
+ ledger.NewApp(appl0.Sender, 500, basics.AppParams{})
+ ledger.NewLocals(appl0.Sender, 500) // opt in
+ // Now txn0 passes, but txn1 has an error because it can't see app 500
+ logic.TestApps(t, []string{getLocalEx, getLocalEx}, txntest.Group(&appl0, &appl1), 9, ledger,
+ logic.Exp(1, "unavailable Local State"))
+
+ // But it's ok in appl2, because appl2 uses the same Sender, even though the
+ // foreign-app is not repeated in appl2 because the holding being accessed
+ // is the one from tx0.
+ logic.TestApps(t, []string{getLocalEx, getLocalEx}, txntest.Group(&appl0, &appl2), 9, ledger)
+ logic.TestApps(t, []string{getLocalEx, getLocalEx}, txntest.Group(&appl0, &appl2), 8, ledger, // version 8 does not get sharing
+ logic.Exp(1, "unavailable App 500"))
+
+ // Checking if an account is opted in has pretty much the same rules
+ optInCheck500 := "txn Sender; int 500; app_opted_in"
+
+ // app_opted_in requires the address and the app exist, else the program fails
+ logic.TestApps(t, []string{optInCheck500, optInCheck500}, txntest.Group(&appl0, &appl1), 9, nil, // nil ledger, no account
+ logic.Exp(0, "no account: "+appl0.Sender.String()))
+
+ // Now txn0 passes, but txn1 has an error because it can't see app 500 locals for appl1.Sender
+ logic.TestApps(t, []string{optInCheck500, optInCheck500}, txntest.Group(&appl0, &appl1), 9, ledger,
+ logic.Exp(1, "unavailable Local State "+appl1.Sender.String()))
+
+ // But it's ok in appl2, because appl2 uses the same Sender, even though the
+ // foreign-app is not repeated in appl2 because the holding being accessed
+ // is the one from tx0.
+ logic.TestApps(t, []string{optInCheck500, optInCheck500}, txntest.Group(&appl0, &appl2), 9, ledger)
+ logic.TestApps(t, []string{optInCheck500, optInCheck500}, txntest.Group(&appl0, &appl2), 8, ledger, // version 8 does not get sharing
+ logic.Exp(1, "unavailable App 500"))
+
+ // Confirm sharing applies to the app id called in tx0, not just foreign app array
+ optInCheck900 := "txn Sender; int 900; app_opted_in; !" // we did not opt any senders into 900
+
+ // as above, appl1 can't see the local state, but appl2 can b/c sender is same as appl0
+ logic.TestApps(t, []string{optInCheck900, optInCheck900}, txntest.Group(&appl0, &appl1), 9, ledger,
+ logic.Exp(1, "unavailable Local State "+appl1.Sender.String()))
+ logic.TestApps(t, []string{optInCheck900, optInCheck900}, txntest.Group(&appl0, &appl2), 9, ledger)
+ logic.TestApps(t, []string{optInCheck900, optInCheck900}, txntest.Group(&appl0, &appl2), 8, ledger, // v8=no sharing
+ logic.Exp(1, "unavailable App 900"))
+
+ // Now, confirm that *setting* a local state in tx1 that was made available
+ // in tx0 works. The extra check here is that the change is recorded
+ // properly in EvalDelta.
+ putLocal := `txn ApplicationArgs 0; byte "X"; int 74; app_local_put; int 1`
+
+ noop := `int 1`
+ sources := []string{noop, putLocal}
+ appl1.ApplicationArgs = [][]byte{appl0.Sender[:]} // tx1 will try to modify local state exposed in tx0
+ logic.TestApps(t, sources, txntest.Group(&appl0, &appl1), 9, ledger,
+ logic.Exp(1, "account "+appl0.Sender.String()+" is not opted into 901"))
+ ledger.NewLocals(appl0.Sender, 901) // opt in
+ ep := logic.TestApps(t, sources, txntest.Group(&appl0, &appl1), 9, ledger)
+ require.Len(t, ep.TxnGroup, 2)
+ ed := ep.TxnGroup[1].ApplyData.EvalDelta
+ require.Equal(t, map[uint64]basics.StateDelta{
+ 1: { // no tx.Accounts, 1 indicates first in SharedAccts
+ "X": {
+ Action: basics.SetUintAction,
+ Uint: 74,
+ },
+ },
+ }, ed.LocalDeltas)
+ require.Len(t, ed.SharedAccts, 1)
+ require.Equal(t, ep.TxnGroup[0].Txn.Sender, ed.SharedAccts[0])
+
+ // when running all three, appl2 can't read the locals of app in tx0 and addr in tx1
+ sources = []string{"", "", "gtxn 1 Sender; gtxn 0 Applications 0; byte 0xAA; app_local_get_ex"}
+ logic.TestApps(t, sources, txntest.Group(&appl0, &appl1, &appl2), 9, nil,
+ logic.Exp(2, "unavailable Local State")) // note that the error message is for Locals, not specialized
+ // same test of account in array of tx1 rather than Sender
+ appl1.Accounts = []basics.Address{{7, 7}}
+ sources = []string{"", "", "gtxn 1 Accounts 1; gtxn 0 Applications 0; byte 0xAA; app_local_get_ex"}
+ logic.TestApps(t, sources, txntest.Group(&appl0, &appl1, &appl2), 9, nil,
+ logic.Exp(2, "unavailable Local State")) // note that the error message is for Locals, not specialized
+}
+
+// TestBetterLocalErrors confirms that we get specific errors about the missing
+// address or app when accessing a Local State with only one available.
+func TestBetterLocalErrors(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ joe := basics.Address{9, 9, 9}
+
+ ep, tx, ledger := logic.MakeSampleEnv()
+ ledger.NewAccount(joe, 5000000)
+ ledger.NewApp(joe, 500, basics.AppParams{})
+ ledger.NewLocals(joe, 500)
+
+ getLocalEx := `
+txn ApplicationArgs 0
+txn ApplicationArgs 1; btoi
+byte "some-key"
+app_local_get_ex
+pop; pop; int 1
+`
+ app := make([]byte, 8)
+ binary.BigEndian.PutUint64(app, 500)
+
+ tx.ApplicationArgs = [][]byte{joe[:], app}
+ logic.TestApp(t, getLocalEx, ep, "unavailable Account "+joe.String()+", unavailable App 500")
+ tx.Accounts = []basics.Address{joe}
+ logic.TestApp(t, getLocalEx, ep, "unavailable App 500")
+ tx.ForeignApps = []basics.AppIndex{500}
+ logic.TestApp(t, getLocalEx, ep)
+ binary.BigEndian.PutUint64(tx.ApplicationArgs[1], 500)
+ logic.TestApp(t, getLocalEx, ep)
+ binary.BigEndian.PutUint64(tx.ApplicationArgs[1], 501)
+ logic.TestApp(t, getLocalEx, ep, "unavailable App 501")
+
+ binary.BigEndian.PutUint64(tx.ApplicationArgs[1], 500)
+ tx.Accounts = []basics.Address{}
+ logic.TestApp(t, getLocalEx, ep, "unavailable Account "+joe.String())
+}
+
+// TestAssetSharing confirms that as of v9, assets can be accessed across
+// groups, but that before then, they could not.
+func TestAssetSharing(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // Create some sample transactions. The main reason this a blackbox test
+ // (_test package) is to have access to txntest.
+ appl0 := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: basics.Address{1, 2, 3, 4},
+ ForeignAssets: []basics.AssetIndex{400},
+ }
+
+ appl1 := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: basics.Address{4, 3, 2, 1},
+ }
+
+ appl2 := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: basics.Address{1, 2, 3, 4},
+ }
+
+ getTotal := "int 400; asset_params_get AssetTotal; pop; pop; int 1"
+
+ // In v8, the first tx can read asset 400, because it's in its foreign array,
+ // but the second can't
+ logic.TestApps(t, []string{getTotal, getTotal}, txntest.Group(&appl0, &appl1), 8, nil,
+ logic.Exp(1, "unavailable Asset 400"))
+ // In v9, the second can, because the first can.
+ logic.TestApps(t, []string{getTotal, getTotal}, txntest.Group(&appl0, &appl1), 9, nil)
+
+ getBalance := "txn Sender; int 400; asset_holding_get AssetBalance; pop; pop; int 1"
+
+ // In contrast, here there's no help from v9, because the second tx is
+ // reading a holding for a different account.
+ logic.TestApps(t, []string{getBalance, getBalance}, txntest.Group(&appl0, &appl1), 8, nil,
+ logic.Exp(1, "unavailable Asset 400"))
+ logic.TestApps(t, []string{getBalance, getBalance}, txntest.Group(&appl0, &appl1), 9, nil,
+ logic.Exp(1, "unavailable Holding"))
+ // But it's ok in appl2, because the same account is used, even though the
+ // foreign-asset is not repeated in appl2.
+ logic.TestApps(t, []string{getBalance, getBalance}, txntest.Group(&appl0, &appl2), 9, nil)
+
+ // when running all three, appl2 can't read the holding of asset in tx0 and addr in tx1
+ sources := []string{"", "", "gtxn 1 Sender; gtxn 0 Assets 0; asset_holding_get AssetBalance"}
+ logic.TestApps(t, sources, txntest.Group(&appl0, &appl1, &appl2), 9, nil,
+ logic.Exp(2, "unavailable Holding")) // note that the error message is for Holding, not specialized
+ // same test of account in array of tx1 rather than Sender
+ appl1.Accounts = []basics.Address{{7, 7}}
+ sources = []string{"", "", "gtxn 1 Accounts 1; gtxn 0 Assets 0; asset_holding_get AssetBalance"}
+ logic.TestApps(t, sources, txntest.Group(&appl0, &appl1, &appl2), 9, nil,
+ logic.Exp(2, "unavailable Holding")) // note that the error message is for Holding, not specialized
+}
+
+// TestBetterHoldingErrors confirms that we get specific errors about the missing
+// address or asa when accessesing a holding with only one available.
+func TestBetterHoldingErrors(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ joe := basics.Address{9, 9, 9}
+
+ ep, tx, ledger := logic.MakeSampleEnv()
+ ledger.NewAccount(joe, 5000000)
+ ledger.NewAsset(joe, 200, basics.AssetParams{})
+ // as creator, joe will also be opted in
+
+ getHoldingBalance := `
+txn ApplicationArgs 0
+txn ApplicationArgs 1; btoi
+asset_holding_get AssetBalance
+pop; pop; int 1
+`
+ asa := make([]byte, 8)
+ binary.BigEndian.PutUint64(asa, 200)
+
+ tx.ApplicationArgs = [][]byte{joe[:], asa}
+ logic.TestApp(t, getHoldingBalance, ep, "unavailable Account "+joe.String()+", unavailable Asset 200")
+ tx.Accounts = []basics.Address{joe}
+ logic.TestApp(t, getHoldingBalance, ep, "unavailable Asset 200")
+ tx.ForeignAssets = []basics.AssetIndex{200}
+ logic.TestApp(t, getHoldingBalance, ep)
+ binary.BigEndian.PutUint64(tx.ApplicationArgs[1], 0) // slot=0 is same (200)
+ logic.TestApp(t, getHoldingBalance, ep)
+
+ binary.BigEndian.PutUint64(tx.ApplicationArgs[1], 200)
+ tx.Accounts = []basics.Address{}
+ logic.TestApp(t, getHoldingBalance, ep, "unavailable Account "+joe.String())
+}
+
+// TestAccountPassing checks that the current app account and foreign app's
+// accounts can be passed in txn.Accounts for a called app.
+func TestAccountPassing(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // appAddressVersion=7
+ logic.TestLogicRange(t, 7, 0, func(t *testing.T, ep *logic.EvalParams, tx *transactions.Transaction, ledger *logic.Ledger) {
+ t.Parallel()
+ accept := logic.TestProg(t, "int 1", 6)
+ alice := basics.Address{1, 1, 1, 1, 1}
+ ledger.NewApp(alice, 4, basics.AppParams{
+ ApprovalProgram: accept.Program,
+ })
+ callWithAccount := `
+itxn_begin
+ int appl; itxn_field TypeEnum
+ int 4; itxn_field ApplicationID
+ %s; itxn_field Accounts
+itxn_submit
+int 1`
+ tx.ForeignApps = []basics.AppIndex{4}
+ ledger.NewAccount(appAddr(888), 50_000)
+ // First show that we're not just letting anything get passed in
+ logic.TestApp(t, fmt.Sprintf(callWithAccount, "int 32; bzero; byte 0x07; b|"), ep,
+ "invalid Account reference AAAAA")
+ // Now show we can pass our own address
+ logic.TestApp(t, fmt.Sprintf(callWithAccount, "global CurrentApplicationAddress"), ep)
+ // Or the address of one of our ForeignApps
+ logic.TestApp(t, fmt.Sprintf(callWithAccount, "addr "+basics.AppIndex(4).Address().String()), ep)
+ })
+}
+
+// TestOtherTxSharing tests resource sharing across other kinds of transactions besides appl.
+func TestOtherTxSharing(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ _, _, ledger := logic.MakeSampleEnv()
+
+ senderAcct := basics.Address{1, 2, 3, 4, 5, 6, 1}
+ ledger.NewAccount(senderAcct, 2001)
+ senderBalance := "txn ApplicationArgs 0; balance; int 2001; =="
+
+ receiverAcct := basics.Address{1, 2, 3, 4, 5, 6, 2}
+ ledger.NewAccount(receiverAcct, 2002)
+ receiverBalance := "txn ApplicationArgs 0; balance; int 2002; =="
+
+ otherAcct := basics.Address{1, 2, 3, 4, 5, 6, 3}
+ ledger.NewAccount(otherAcct, 2003)
+ otherBalance := "txn ApplicationArgs 0; balance; int 2003; =="
+
+ other2Acct := basics.Address{1, 2, 3, 4, 5, 6, 4}
+ ledger.NewAccount(other2Acct, 2004)
+ other2Balance := "txn ApplicationArgs 0; balance; int 2004; =="
+
+ appl := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: basics.Address{5, 5, 5, 5}, // different from all other accounts used
+ ApplicationArgs: [][]byte{senderAcct[:]},
+ }
+
+ keyreg := txntest.Txn{
+ Type: protocol.KeyRegistrationTx,
+ Sender: senderAcct,
+ }
+ pay := txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: senderAcct,
+ Receiver: receiverAcct,
+ }
+ acfg := txntest.Txn{
+ Type: protocol.AssetConfigTx,
+ Sender: senderAcct,
+ AssetParams: basics.AssetParams{
+ Manager: otherAcct, // other is here to show they _don't_ become available
+ Reserve: otherAcct,
+ Freeze: otherAcct,
+ Clawback: otherAcct,
+ },
+ }
+ axfer := txntest.Txn{
+ Type: protocol.AssetTransferTx,
+ XferAsset: 100, // must be < 256, later code assumes it fits in a byte
+ Sender: senderAcct,
+ AssetReceiver: receiverAcct,
+ AssetSender: otherAcct,
+ }
+ afrz := txntest.Txn{
+ Type: protocol.AssetFreezeTx,
+ FreezeAsset: 200, // must be < 256, later code assumes it fits in a byte
+ Sender: senderAcct,
+ FreezeAccount: otherAcct,
+ }
+
+ for _, send := range []txntest.Txn{keyreg, pay, acfg, axfer, afrz} {
+ logic.TestApps(t, []string{"", senderBalance}, txntest.Group(&send, &appl), 9, ledger)
+ logic.TestApps(t, []string{senderBalance, ""}, txntest.Group(&appl, &send), 9, ledger)
+
+ logic.TestApps(t, []string{"", senderBalance}, txntest.Group(&send, &appl), 8, ledger,
+ logic.Exp(1, "invalid Account reference"))
+ logic.TestApps(t, []string{senderBalance, ""}, txntest.Group(&appl, &send), 8, ledger,
+ logic.Exp(0, "invalid Account reference"))
+ }
+
+ holdingAccess := `
+ txn ApplicationArgs 0
+ txn ApplicationArgs 1; btoi
+ asset_holding_get AssetBalance
+ pop; pop; int 1
+`
+
+ t.Run("keyreg", func(t *testing.T) {
+ appl.ApplicationArgs = [][]byte{senderAcct[:], {200}}
+ logic.TestApps(t, []string{"", holdingAccess}, txntest.Group(&keyreg, &appl), 9, ledger,
+ logic.Exp(1, "unavailable Asset 200"))
+ withRef := appl
+ withRef.ForeignAssets = []basics.AssetIndex{200}
+ logic.TestApps(t, []string{"", holdingAccess}, txntest.Group(&keyreg, &withRef), 9, ledger,
+ logic.Exp(1, "unavailable Holding "+senderAcct.String()))
+ })
+ t.Run("pay", func(t *testing.T) {
+ // The receiver is available for algo balance reading
+ appl.ApplicationArgs = [][]byte{receiverAcct[:]}
+ logic.TestApps(t, []string{"", receiverBalance}, txntest.Group(&pay, &appl), 9, ledger)
+
+ // The other account is not (it's not even in the pay txn)
+ appl.ApplicationArgs = [][]byte{otherAcct[:]}
+ logic.TestApps(t, []string{"", otherBalance}, txntest.Group(&pay, &appl), 9, ledger,
+ logic.Exp(1, "invalid Account reference "+otherAcct.String()))
+
+ // The other account becomes accessible because used in CloseRemainderTo
+ withClose := pay
+ withClose.CloseRemainderTo = otherAcct
+ logic.TestApps(t, []string{"", otherBalance}, txntest.Group(&withClose, &appl), 9, ledger)
+ })
+
+ t.Run("acfg", func(t *testing.T) {
+ // The other account is not available even though it's all the extra addresses
+ appl.ApplicationArgs = [][]byte{otherAcct[:]}
+ logic.TestApps(t, []string{"", otherBalance}, txntest.Group(&acfg, &appl), 9, ledger,
+ logic.Exp(1, "invalid Account reference "+otherAcct.String()))
+ })
+
+ t.Run("axfer", func(t *testing.T) {
+ // The receiver is also available for algo balance reading
+ appl.ApplicationArgs = [][]byte{receiverAcct[:]}
+ logic.TestApps(t, []string{"", receiverBalance}, txntest.Group(&axfer, &appl), 9, ledger)
+
+ // as is the "other" (AssetSender)
+ appl.ApplicationArgs = [][]byte{otherAcct[:]}
+ logic.TestApps(t, []string{"", otherBalance}, txntest.Group(&axfer, &appl), 9, ledger)
+
+ // sender holding is available
+ appl.ApplicationArgs = [][]byte{senderAcct[:], {byte(axfer.XferAsset)}}
+ logic.TestApps(t, []string{"", holdingAccess}, txntest.Group(&axfer, &appl), 9, ledger)
+
+ // receiver holding is available
+ appl.ApplicationArgs = [][]byte{receiverAcct[:], {byte(axfer.XferAsset)}}
+ logic.TestApps(t, []string{"", holdingAccess}, txntest.Group(&axfer, &appl), 9, ledger)
+
+ // asset sender (other) account is available
+ appl.ApplicationArgs = [][]byte{otherAcct[:], {byte(axfer.XferAsset)}}
+ logic.TestApps(t, []string{"", holdingAccess}, txntest.Group(&axfer, &appl), 9, ledger)
+
+ // AssetCloseTo holding becomes available when set
+ appl.ApplicationArgs = [][]byte{other2Acct[:], {byte(axfer.XferAsset)}}
+ logic.TestApps(t, []string{"", other2Balance}, txntest.Group(&axfer, &appl), 9, ledger,
+ logic.Exp(1, "invalid Account reference "+other2Acct.String()))
+ logic.TestApps(t, []string{"", holdingAccess}, txntest.Group(&axfer, &appl), 9, ledger,
+ logic.Exp(1, "unavailable Account "+other2Acct.String()))
+
+ withClose := axfer
+ withClose.AssetCloseTo = other2Acct
+ appl.ApplicationArgs = [][]byte{other2Acct[:], {byte(axfer.XferAsset)}}
+ logic.TestApps(t, []string{"", other2Balance}, txntest.Group(&withClose, &appl), 9, ledger)
+ logic.TestApps(t, []string{"", holdingAccess}, txntest.Group(&withClose, &appl), 9, ledger)
+ })
+
+ t.Run("afrz", func(t *testing.T) {
+ // The other account is available (for algo and asset)
+ appl.ApplicationArgs = [][]byte{otherAcct[:], {byte(afrz.FreezeAsset)}}
+ logic.TestApps(t, []string{"", otherBalance}, txntest.Group(&afrz, &appl), 9, ledger)
+ logic.TestApps(t, []string{"", holdingAccess}, txntest.Group(&afrz, &appl), 9, ledger)
+
+ // The sender holding is _not_ (because the freezeaccount's holding is irrelevant to afrz)
+ appl.ApplicationArgs = [][]byte{senderAcct[:], {byte(afrz.FreezeAsset)}}
+ logic.TestApps(t, []string{"", senderBalance}, txntest.Group(&afrz, &appl), 9, ledger)
+ logic.TestApps(t, []string{"", holdingAccess}, txntest.Group(&afrz, &appl), 9, ledger,
+ logic.Exp(1, "unavailable Holding "+senderAcct.String()))
+ })
+}
+
+// TestSharedInnerTxns checks how inner txns access resources.
+func TestSharedInnerTxns(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ _, _, ledger := logic.MakeSampleEnv()
+
+ const asa1 = 201
+ const asa2 = 202
+
+ senderAcct := basics.Address{1, 2, 3, 4, 5, 6, 1}
+ ledger.NewAccount(senderAcct, 2001)
+ ledger.NewHolding(senderAcct, asa1, 1, false)
+
+ receiverAcct := basics.Address{1, 2, 3, 4, 5, 6, 2}
+ ledger.NewAccount(receiverAcct, 2002)
+ ledger.NewHolding(receiverAcct, asa1, 1, false)
+
+ otherAcct := basics.Address{1, 2, 3, 4, 5, 6, 3}
+ ledger.NewAccount(otherAcct, 2003)
+ ledger.NewHolding(otherAcct, asa1, 1, false)
+
+ unusedAcct := basics.Address{1, 2, 3, 4, 5, 6, 4}
+
+ payToArg := `
+itxn_begin
+ int pay; itxn_field TypeEnum
+ int 100; itxn_field Amount
+ txn ApplicationArgs 0; itxn_field Receiver
+itxn_submit
+int 1
+`
+ axferToArgs := `
+itxn_begin
+ int axfer; itxn_field TypeEnum
+ int 2; itxn_field AssetAmount
+ txn ApplicationArgs 0; itxn_field AssetReceiver
+ txn ApplicationArgs 1; btoi; itxn_field XferAsset
+itxn_submit
+int 1
+`
+
+ acfgArg := `
+itxn_begin
+ int acfg; itxn_field TypeEnum
+ txn ApplicationArgs 0; btoi; itxn_field ConfigAsset
+itxn_submit
+int 1
+`
+
+ appl := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ ApplicationID: 1234,
+ Sender: basics.Address{5, 5, 5, 5}, // different from all other accounts used
+ }
+ appAcct := appl.ApplicationID.Address()
+ // App will do a lot of txns. Start well funded.
+ ledger.NewAccount(appAcct, 1_000_000)
+ // And needs some ASAs for inner axfer testing
+ ledger.NewHolding(appAcct, asa1, 1_000_000, false)
+
+ t.Run("keyreg", func(t *testing.T) {
+ keyreg := txntest.Txn{
+ Type: protocol.KeyRegistrationTx,
+ Sender: senderAcct,
+ }
+
+ // appl has no foreign ref to senderAcct, but can still inner pay it
+ appl.ApplicationArgs = [][]byte{senderAcct[:]}
+ logic.TestApps(t, []string{"", payToArg}, txntest.Group(&keyreg, &appl), 9, ledger)
+ logic.TestApps(t, []string{"", payToArg}, txntest.Group(&keyreg, &appl), 8, ledger,
+ logic.Exp(1, "invalid Account reference "+senderAcct.String()))
+
+ // confirm you can't just pay _anybody_. receiverAcct is not in use at all.
+ appl.ApplicationArgs = [][]byte{receiverAcct[:]}
+ logic.TestApps(t, []string{"", payToArg}, txntest.Group(&keyreg, &appl), 9, ledger,
+ logic.Exp(1, "invalid Account reference "+receiverAcct.String()))
+ })
+
+ t.Run("pay", func(t *testing.T) {
+ pay := txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: senderAcct,
+ Receiver: receiverAcct,
+ }
+
+ // appl has no foreign ref to senderAcct or receiverAcct, but can still inner pay them
+ appl.ApplicationArgs = [][]byte{senderAcct[:]}
+ logic.TestApps(t, []string{"", payToArg}, txntest.Group(&pay, &appl), 9, ledger)
+ logic.TestApps(t, []string{"", payToArg}, txntest.Group(&pay, &appl), 8, ledger,
+ logic.Exp(1, "invalid Account reference "+senderAcct.String()))
+
+ appl.ApplicationArgs = [][]byte{receiverAcct[:]}
+ logic.TestApps(t, []string{"", payToArg}, txntest.Group(&pay, &appl), 9, ledger)
+ logic.TestApps(t, []string{"", payToArg}, txntest.Group(&pay, &appl), 8, ledger,
+ logic.Exp(1, "invalid Account reference "+receiverAcct.String()))
+
+ // confirm you can't just pay _anybody_. otherAcct is not in use at all.
+ appl.ApplicationArgs = [][]byte{otherAcct[:]}
+ logic.TestApps(t, []string{"", payToArg}, txntest.Group(&pay, &appl), 9, ledger,
+ logic.Exp(1, "invalid Account reference "+otherAcct.String()))
+ })
+
+ t.Run("axfer", func(t *testing.T) {
+ axfer := txntest.Txn{
+ Type: protocol.AssetTransferTx,
+ XferAsset: asa1,
+ Sender: senderAcct,
+ AssetReceiver: receiverAcct,
+ AssetSender: otherAcct,
+ }
+
+ // appl can pay the axfer sender
+ appl.ApplicationArgs = [][]byte{senderAcct[:], {asa1}}
+ logic.TestApps(t, []string{"", payToArg}, txntest.Group(&axfer, &appl), 9, ledger)
+ logic.TestApps(t, []string{"", payToArg}, txntest.Group(&axfer, &appl), 8, ledger,
+ logic.Exp(1, "invalid Account reference "+senderAcct.String()))
+ // but can't axfer to sender, because appAcct doesn't have holding access for the asa
+ logic.TestApps(t, []string{"", axferToArgs}, txntest.Group(&axfer, &appl), 9, ledger,
+ logic.Exp(1, "unavailable Holding"))
+ // and to the receiver
+ appl.ApplicationArgs = [][]byte{receiverAcct[:], {asa1}}
+ logic.TestApps(t, []string{payToArg}, txntest.Group(&appl, &axfer), 9, ledger)
+ logic.TestApps(t, []string{axferToArgs}, txntest.Group(&appl, &axfer), 9, ledger,
+ logic.Exp(0, "unavailable Holding"))
+ // and to the clawback
+ appl.ApplicationArgs = [][]byte{otherAcct[:], {asa1}}
+ logic.TestApps(t, []string{"", payToArg}, txntest.Group(&axfer, &appl), 9, ledger)
+ logic.TestApps(t, []string{"", axferToArgs}, txntest.Group(&axfer, &appl), 9, ledger,
+ logic.Exp(1, "unavailable Holding"))
+
+ // Those axfers become possible by adding the asa to the appl's ForeignAssets
+ appl.ForeignAssets = []basics.AssetIndex{asa1}
+ appl.ApplicationArgs = [][]byte{senderAcct[:], {asa1}}
+ logic.TestApps(t, []string{"", axferToArgs}, txntest.Group(&axfer, &appl), 9, ledger)
+ appl.ApplicationArgs = [][]byte{receiverAcct[:], {asa1}}
+ logic.TestApps(t, []string{axferToArgs}, txntest.Group(&appl, &axfer), 9, ledger)
+ appl.ApplicationArgs = [][]byte{otherAcct[:], {asa1}}
+ logic.TestApps(t, []string{"", axferToArgs}, txntest.Group(&axfer, &appl), 9, ledger)
+
+ // but can't axfer a different asset
+ appl.ApplicationArgs = [][]byte{senderAcct[:], {asa2}}
+ logic.TestApps(t, []string{"", axferToArgs}, txntest.Group(&axfer, &appl), 9, ledger,
+ logic.Exp(1, fmt.Sprintf("unavailable Asset %d", asa2)))
+ // or correct asset to an unknown address
+ appl.ApplicationArgs = [][]byte{unusedAcct[:], {asa1}}
+ logic.TestApps(t, []string{"", axferToArgs}, txntest.Group(&axfer, &appl), 9, ledger,
+ logic.Exp(1, "invalid Account reference"))
+
+ // appl can acfg the asset from tx0 (which requires asset available, not holding)
+ appl.ApplicationArgs = [][]byte{{asa1}}
+ logic.TestApps(t, []string{"", acfgArg}, txntest.Group(&axfer, &appl), 9, ledger)
+ appl.ApplicationArgs = [][]byte{{asa2}} // but not asa2
+ logic.TestApps(t, []string{"", acfgArg}, txntest.Group(&axfer, &appl), 9, ledger,
+ logic.Exp(1, fmt.Sprintf("unavailable Asset %d", asa2)))
+
+ // Now, confirm that access to account from a pay in one tx, and asa
+ // from another don't allow inner axfer in the third (because there's no
+ // access to that payer's holding.)
+ payAcct := basics.Address{3, 2, 3, 2, 3, 2}
+ pay := txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: payAcct,
+ Receiver: payAcct,
+ }
+ // the asset is acfg-able
+ appl.ApplicationArgs = [][]byte{{asa1}}
+ logic.TestApps(t, []string{"", "", acfgArg}, txntest.Group(&pay, &axfer, &appl), 9, ledger)
+ logic.TestApps(t, []string{"", "", acfgArg}, txntest.Group(&axfer, &pay, &appl), 9, ledger)
+ // payAcct (the pay sender) is payable
+ appl.ApplicationArgs = [][]byte{payAcct[:]}
+ logic.TestApps(t, []string{"", "", payToArg}, txntest.Group(&axfer, &pay, &appl), 9, ledger)
+ // but the cross-product is not available, so no axfer (opting in first, to prevent that error)
+ ledger.NewHolding(payAcct, asa1, 1, false)
+ appl.ApplicationArgs = [][]byte{payAcct[:], {asa1}}
+ logic.TestApps(t, []string{"", "", axferToArgs}, txntest.Group(&axfer, &pay, &appl), 9, ledger,
+ logic.Exp(2, "unavailable Holding "+payAcct.String()))
+ })
+
+ t.Run("afrz", func(t *testing.T) {
+ appl.ForeignAssets = []basics.AssetIndex{} // reset after previous tests
+ afrz := txntest.Txn{
+ Type: protocol.AssetFreezeTx,
+ FreezeAsset: asa1,
+ Sender: senderAcct,
+ FreezeAccount: otherAcct,
+ }
+
+ // appl can pay to the sender & freeze account
+ appl.ApplicationArgs = [][]byte{senderAcct[:], {asa1}}
+ logic.TestApps(t, []string{"", payToArg}, txntest.Group(&afrz, &appl), 9, ledger)
+ appl.ApplicationArgs = [][]byte{otherAcct[:], {asa1}}
+ logic.TestApps(t, []string{"", payToArg}, txntest.Group(&afrz, &appl), 9, ledger)
+
+ // can't axfer to the afrz sender because appAcct holding is not available from afrz
+ appl.ApplicationArgs = [][]byte{senderAcct[:], {asa1}}
+ logic.TestApps(t, []string{"", axferToArgs}, txntest.Group(&afrz, &appl), 9, ledger,
+ logic.Exp(1, "unavailable Holding "+appAcct.String()))
+ appl.ForeignAssets = []basics.AssetIndex{asa1}
+ // _still_ can't axfer to sender because afrz sender's holding does NOT
+ // become available (not note that complaint is now about that account)
+ logic.TestApps(t, []string{"", axferToArgs}, txntest.Group(&afrz, &appl), 9, ledger,
+ logic.Exp(1, "unavailable Holding "+senderAcct.String()))
+
+ // and not to the receiver which isn't in afrz
+ appl.ApplicationArgs = [][]byte{receiverAcct[:], {asa1}}
+ logic.TestApps(t, []string{payToArg}, txntest.Group(&appl, &afrz), 9, ledger,
+ logic.Exp(0, "invalid Account reference "+receiverAcct.String()))
+ logic.TestApps(t, []string{axferToArgs}, txntest.Group(&appl, &afrz), 9, ledger,
+ logic.Exp(0, "invalid Account reference "+receiverAcct.String()))
+
+ // otherAcct is the afrz target, it's holding and account are available
+ appl.ApplicationArgs = [][]byte{otherAcct[:], {asa1}}
+ logic.TestApps(t, []string{"", payToArg}, txntest.Group(&afrz, &appl), 9, ledger)
+ logic.TestApps(t, []string{"", axferToArgs}, txntest.Group(&afrz, &appl), 9, ledger)
+
+ // but still can't axfer a different asset
+ appl.ApplicationArgs = [][]byte{otherAcct[:], {asa2}}
+ logic.TestApps(t, []string{"", axferToArgs}, txntest.Group(&afrz, &appl), 9, ledger,
+ logic.Exp(1, fmt.Sprintf("unavailable Asset %d", asa2)))
+ appl.ForeignAssets = []basics.AssetIndex{asa2}
+ // once added to appl's foreign array, the appl still lacks access to other's holding
+ logic.TestApps(t, []string{"", axferToArgs}, txntest.Group(&afrz, &appl), 9, ledger,
+ logic.Exp(1, "unavailable Holding "+otherAcct.String()))
+
+ // appl can acfg the asset from tx0 (which requires asset available, not holding)
+ appl.ForeignAssets = []basics.AssetIndex{}
+ appl.ApplicationArgs = [][]byte{{asa1}}
+ logic.TestApps(t, []string{"", acfgArg}, txntest.Group(&afrz, &appl), 9, ledger)
+ appl.ApplicationArgs = [][]byte{{asa2}} // but not asa2
+ logic.TestApps(t, []string{"", acfgArg}, txntest.Group(&afrz, &appl), 9, ledger,
+ logic.Exp(1, fmt.Sprintf("unavailable Asset %d", asa2)))
+
+ })
+
+ t.Run("appl", func(t *testing.T) {
+ appl.ForeignAssets = []basics.AssetIndex{} // reset after previous test
+ appl.Accounts = []basics.Address{} // reset after previous tests
+ appl0 := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: senderAcct,
+ Accounts: []basics.Address{otherAcct},
+ ForeignAssets: []basics.AssetIndex{asa1},
+ }
+
+ // appl can pay to the otherAcct because it was in tx0
+ appl.ApplicationArgs = [][]byte{otherAcct[:], {asa1}}
+ logic.TestApps(t, []string{"", payToArg}, txntest.Group(&appl0, &appl), 9, ledger)
+ logic.TestApps(t, []string{"", payToArg}, txntest.Group(&appl0, &appl), 8, ledger, // version 8 does not get sharing
+ logic.Exp(1, "invalid Account reference "+otherAcct.String()))
+ // appl can (almost) axfer asa1 to the otherAcct because both are in tx0
+ logic.TestApps(t, []string{"", axferToArgs}, txntest.Group(&appl0, &appl), 9, ledger,
+ logic.Exp(1, "axfer Sender: unavailable Holding"))
+ // but it can't take access it's OWN asa1, unless added to ForeignAssets
+ appl.ForeignAssets = []basics.AssetIndex{asa1}
+ logic.TestApps(t, []string{"", axferToArgs}, txntest.Group(&appl0, &appl), 9, ledger)
+
+ // but it can't use 202 at all. notice the error is more direct that
+ // above, as the problem is not the axfer Sender, only, it's that 202
+ // can't be used at all.
+ appl.ApplicationArgs = [][]byte{otherAcct[:], {asa2}}
+ logic.TestApps(t, []string{"", axferToArgs}, txntest.Group(&appl0, &appl), 9, ledger,
+ logic.Exp(1, "unavailable Asset 202"))
+ // And adding asa2 does not fix this problem, because the other x 202 holding is unavailable
+ appl.ForeignAssets = []basics.AssetIndex{asa2}
+ logic.TestApps(t, []string{"", axferToArgs}, txntest.Group(&appl0, &appl), 9, ledger,
+ logic.Exp(1, "axfer AssetReceiver: unavailable Holding "+otherAcct.String()+" x 202"))
+
+ // Now, conduct similar tests, but with the apps performing the
+ // pays/axfers invoked from an outer app. Use various versions to check
+ // cross version sharing.
+
+ // add v8 and v9 versions of the pay app to the ledger for inner calling
+ payToArgV8 := logic.TestProg(t, payToArg, 8)
+ ledger.NewApp(senderAcct, 88, basics.AppParams{ApprovalProgram: payToArgV8.Program})
+ ledger.NewAccount(appAddr(88), 1_000_000)
+ payToArgV9 := logic.TestProg(t, payToArg, 9)
+ ledger.NewApp(senderAcct, 99, basics.AppParams{ApprovalProgram: payToArgV9.Program})
+ ledger.NewAccount(appAddr(99), 1_000_000)
+
+ approvalV8 := logic.TestProg(t, "int 1", 8)
+ ledger.NewApp(senderAcct, 11, basics.AppParams{ApprovalProgram: approvalV8.Program})
+
+ innerCallTemplate := `
+itxn_begin
+int appl; itxn_field TypeEnum;
+txn ApplicationArgs 0; btoi; itxn_field ApplicationID
+txn ApplicationArgs 1; itxn_field ApplicationArgs
+txn ApplicationArgs 2; itxn_field ApplicationArgs
+%s
+itxn_submit
+int 1
+`
+ innerCall := fmt.Sprintf(innerCallTemplate, "")
+
+ appl.ForeignApps = []basics.AppIndex{11, 88, 99}
+
+ appl.ApplicationArgs = [][]byte{{99}, otherAcct[:], {asa1}}
+ logic.TestApps(t, []string{"", innerCall}, txntest.Group(&appl0, &appl), 9, ledger)
+ // when the inner program is v8, it can't perform the pay
+ appl.ApplicationArgs = [][]byte{{88}, otherAcct[:], {asa1}}
+ logic.TestApps(t, []string{"", innerCall}, txntest.Group(&appl0, &appl), 9, ledger,
+ logic.Exp(1, "invalid Account reference "+otherAcct.String()))
+ // unless the caller passes in the account, but it can't pass the
+ // account because that also would give the called app access to the
+ // passed account's local state (which isn't available to the caller)
+ innerCallWithAccount := fmt.Sprintf(innerCallTemplate, "addr "+otherAcct.String()+"; itxn_field Accounts")
+ logic.TestApps(t, []string{"", innerCallWithAccount}, txntest.Group(&appl0, &appl), 9, ledger,
+ logic.Exp(1, "appl ApplicationID: unavailable Local State "+otherAcct.String()))
+ // the caller can't fix by passing 88 as a foreign app, because doing so
+ // is not much different than the current situation: 88 is being called,
+ // it's already available.
+ innerCallWithBoth := fmt.Sprintf(innerCallTemplate,
+ "addr "+otherAcct.String()+"; itxn_field Accounts; int 88; itxn_field Applications")
+ logic.TestApps(t, []string{"", innerCallWithBoth}, txntest.Group(&appl0, &appl), 9, ledger,
+ logic.Exp(1, "appl ApplicationID: unavailable Local State "+otherAcct.String()))
+
+ // the caller *can* do it if it originally had access to that 88 holding.
+ appl0.ForeignApps = []basics.AppIndex{88}
+ logic.TestApps(t, []string{"", innerCallWithAccount}, txntest.Group(&appl0, &appl), 9, ledger)
+
+ // here we confirm that even if we try calling another app, we still
+ // can't pass in `other` and 88, because that would give the app access
+ // to that local state. (this is confirming we check the cross product
+ // of the foreign arrays, not just the accounts against called app id)
+ appl.ApplicationArgs = [][]byte{{11}, otherAcct[:], {asa1}}
+ appl0.ForeignApps = []basics.AppIndex{11}
+ logic.TestApps(t, []string{"", innerCallWithBoth}, txntest.Group(&appl0, &appl), 9, ledger,
+ logic.Exp(1, "appl ForeignApps: unavailable Local State "+otherAcct.String()))
+
+ })
+
+}
+
+// TestAccessMyLocals confirms that apps can access their OWN locals if they opt
+// in at creation time.
+func TestAccessMyLocals(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // start at 3, needs assert
+ logic.TestLogicRange(t, 3, 0, func(t *testing.T, ep *logic.EvalParams, tx *transactions.Transaction, ledger *logic.Ledger) {
+ sender := basics.Address{1, 2, 3, 4}
+ ledger.NewAccount(sender, 1_000_000)
+ // we don't really process transactions in these tests, so despite the
+ // OptInOC below, we must manually opt the sender into the app that
+ // will get created for this test.
+ ledger.NewLocals(sender, 888)
+
+ *tx = txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender,
+ ApplicationID: 0,
+ OnCompletion: transactions.OptInOC,
+ LocalStateSchema: basics.StateSchema{
+ NumUint: 1,
+ },
+ }.Txn()
+ source := `
+ int 0
+ byte "X"
+ app_local_get
+ !
+ assert
+ int 0
+ byte "X"
+ int 7
+ app_local_put
+ int 0
+ byte "X"
+ app_local_get
+ int 7
+ ==
+ assert
+ int 0
+ byte "X"
+ app_local_del
+ int 1
+`
+ logic.TestApp(t, source, ep)
+ if ep.Proto.LogicSigVersion >= 4 {
+ // confirm "txn Sender" also works
+ source = strings.ReplaceAll(source, "int 0\n", "txn Sender\n")
+ logic.TestApp(t, source, ep)
+ }
+
+ logic.TestApp(t, "int 0; int 0; app_opted_in", ep)
+ })
+}
diff --git a/data/transactions/logic/sourcemap.go b/data/transactions/logic/sourcemap.go
index 3580a120e..6bc516749 100644
--- a/data/transactions/logic/sourcemap.go
+++ b/data/transactions/logic/sourcemap.go
@@ -27,7 +27,7 @@ const sourceMapVersion = 3
const b64table string = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
// SourceMap contains details from the source to assembly process.
-// Currently contains the map between TEAL source line to
+// Currently, contains the map between TEAL source line to
// the assembled bytecode position and details about
// the template variables contained in the source file.
type SourceMap struct {
diff --git a/data/transactions/logic/tracer.go b/data/transactions/logic/tracer.go
index 89802b1c0..4b4c5f580 100644
--- a/data/transactions/logic/tracer.go
+++ b/data/transactions/logic/tracer.go
@@ -16,7 +16,11 @@
package logic
-import "github.com/algorand/go-algorand/data/transactions"
+import (
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+)
// EvalTracer functions are called by eval function during AVM program execution, if a tracer
// is provided.
@@ -93,7 +97,28 @@ import "github.com/algorand/go-algorand/data/transactions"
// │ │
// │ > AfterTxnGroup │
// └──────────────────────────────────────────────────────┘
+//
+// Block Lifecycle Graph
+// ┌──────────────────────────────────────────────────────┐
+// │ Block Evaluation │
+// │ ┌────────────────────────────────────────────────┐ │
+// │ │ > BeforeBlock │ │
+// │ │ │ │
+// │ │ ┌──────────────────────────────────────────┐ │ │
+// │ │ │ > Transaction/LogicSig Lifecycle │ │ │
+// │ │ ├──────────────────────────────────────────┤ │ │
+// │ │ │ ┌────────────────────────────────────┐ │ │ │
+// │ │ │ │ ... │ │ │ │
+// │ │ │ └────────────────────────────────────┘ │ │ │
+// │ │ └──────────────────────────────────────────┘ │ │
+// │ ├────────────────────────────────────────────────│ │
+// │ │ > AfterBlock │ │
+// │ └────────────────────────────────────────────────┘ │
+// └──────────────────────────────────────────────────────┘
type EvalTracer interface {
+ // BeforeBlock is called once at the beginning of block evaluation. It is passed the block header.
+ BeforeBlock(hdr *bookkeeping.BlockHeader)
+
// BeforeTxnGroup is called before a transaction group is executed. This includes both top-level
// and inner transaction groups. The argument ep is the EvalParams object for the group; if the
// group is an inner group, this is the EvalParams object for the inner group.
@@ -105,7 +130,10 @@ type EvalTracer interface {
// AfterTxnGroup is called after a transaction group has been executed. This includes both
// top-level and inner transaction groups. The argument ep is the EvalParams object for the
// group; if the group is an inner group, this is the EvalParams object for the inner group.
- AfterTxnGroup(ep *EvalParams, evalError error)
+ // For top-level transaction groups, the deltas argument is the ledgercore.StateDelta changes
+ // that occurred because of this transaction group. For inner transaction groups, this argument
+ // is nil.
+ AfterTxnGroup(ep *EvalParams, deltas *ledgercore.StateDelta, evalError error)
// BeforeTxn is called before a transaction is executed.
//
@@ -130,16 +158,24 @@ type EvalTracer interface {
// AfterOpcode is called after the op has been evaluated
AfterOpcode(cx *EvalContext, evalError error)
+
+ // AfterBlock is called after the block has finished evaluation. It will not be called in the event that an evalError
+ // stops evaluation of the block.
+ AfterBlock(hdr *bookkeeping.BlockHeader)
}
// NullEvalTracer implements EvalTracer, but all of its hook methods do nothing
type NullEvalTracer struct{}
+// BeforeBlock does nothing
+func (n NullEvalTracer) BeforeBlock(hdr *bookkeeping.BlockHeader) {}
+
// BeforeTxnGroup does nothing
func (n NullEvalTracer) BeforeTxnGroup(ep *EvalParams) {}
// AfterTxnGroup does nothing
-func (n NullEvalTracer) AfterTxnGroup(ep *EvalParams, evalError error) {}
+func (n NullEvalTracer) AfterTxnGroup(ep *EvalParams, deltas *ledgercore.StateDelta, evalError error) {
+}
// BeforeTxn does nothing
func (n NullEvalTracer) BeforeTxn(ep *EvalParams, groupIndex int) {}
@@ -159,3 +195,6 @@ func (n NullEvalTracer) BeforeOpcode(cx *EvalContext) {}
// AfterOpcode does nothing
func (n NullEvalTracer) AfterOpcode(cx *EvalContext, evalError error) {}
+
+// AfterBlock does nothing
+func (n NullEvalTracer) AfterBlock(hdr *bookkeeping.BlockHeader) {}
diff --git a/data/transactions/msgp_gen.go b/data/transactions/msgp_gen.go
index 641e541c7..053330615 100644
--- a/data/transactions/msgp_gen.go
+++ b/data/transactions/msgp_gen.go
@@ -1875,33 +1875,37 @@ func (z *BoxRef) MsgIsZero() bool {
func (z *EvalDelta) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0005Len := uint32(4)
- var zb0005Mask uint8 /* 5 bits */
+ zb0006Len := uint32(5)
+ var zb0006Mask uint8 /* 6 bits */
if (*z).GlobalDelta.MsgIsZero() {
- zb0005Len--
- zb0005Mask |= 0x2
+ zb0006Len--
+ zb0006Mask |= 0x2
}
if len((*z).InnerTxns) == 0 {
- zb0005Len--
- zb0005Mask |= 0x4
+ zb0006Len--
+ zb0006Mask |= 0x4
}
if len((*z).LocalDeltas) == 0 {
- zb0005Len--
- zb0005Mask |= 0x8
+ zb0006Len--
+ zb0006Mask |= 0x8
}
if len((*z).Logs) == 0 {
- zb0005Len--
- zb0005Mask |= 0x10
+ zb0006Len--
+ zb0006Mask |= 0x10
+ }
+ if len((*z).SharedAccts) == 0 {
+ zb0006Len--
+ zb0006Mask |= 0x20
}
- // variable map header, size zb0005Len
- o = append(o, 0x80|uint8(zb0005Len))
- if zb0005Len != 0 {
- if (zb0005Mask & 0x2) == 0 { // if not empty
+ // variable map header, size zb0006Len
+ o = append(o, 0x80|uint8(zb0006Len))
+ if zb0006Len != 0 {
+ if (zb0006Mask & 0x2) == 0 { // if not empty
// string "gd"
o = append(o, 0xa2, 0x67, 0x64)
o = (*z).GlobalDelta.MarshalMsg(o)
}
- if (zb0005Mask & 0x4) == 0 { // if not empty
+ if (zb0006Mask & 0x4) == 0 { // if not empty
// string "itx"
o = append(o, 0xa3, 0x69, 0x74, 0x78)
if (*z).InnerTxns == nil {
@@ -1909,11 +1913,11 @@ func (z *EvalDelta) MarshalMsg(b []byte) (o []byte) {
} else {
o = msgp.AppendArrayHeader(o, uint32(len((*z).InnerTxns)))
}
- for zb0004 := range (*z).InnerTxns {
- o = (*z).InnerTxns[zb0004].MarshalMsg(o)
+ for zb0005 := range (*z).InnerTxns {
+ o = (*z).InnerTxns[zb0005].MarshalMsg(o)
}
}
- if (zb0005Mask & 0x8) == 0 { // if not empty
+ if (zb0006Mask & 0x8) == 0 { // if not empty
// string "ld"
o = append(o, 0xa2, 0x6c, 0x64)
if (*z).LocalDeltas == nil {
@@ -1933,7 +1937,7 @@ func (z *EvalDelta) MarshalMsg(b []byte) (o []byte) {
o = zb0002.MarshalMsg(o)
}
}
- if (zb0005Mask & 0x10) == 0 { // if not empty
+ if (zb0006Mask & 0x10) == 0 { // if not empty
// string "lg"
o = append(o, 0xa2, 0x6c, 0x67)
if (*z).Logs == nil {
@@ -1941,8 +1945,20 @@ func (z *EvalDelta) MarshalMsg(b []byte) (o []byte) {
} else {
o = msgp.AppendArrayHeader(o, uint32(len((*z).Logs)))
}
- for zb0003 := range (*z).Logs {
- o = msgp.AppendString(o, (*z).Logs[zb0003])
+ for zb0004 := range (*z).Logs {
+ o = msgp.AppendString(o, (*z).Logs[zb0004])
+ }
+ }
+ if (zb0006Mask & 0x20) == 0 { // if not empty
+ // string "sa"
+ o = append(o, 0xa2, 0x73, 0x61)
+ if (*z).SharedAccts == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).SharedAccts)))
+ }
+ for zb0003 := range (*z).SharedAccts {
+ o = (*z).SharedAccts[zb0003].MarshalMsg(o)
}
}
}
@@ -1958,46 +1974,46 @@ func (_ *EvalDelta) CanMarshalMsg(z interface{}) bool {
func (z *EvalDelta) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
- var zb0005 int
- var zb0006 bool
- zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
if _, ok := err.(msgp.TypeError); ok {
- zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
- if zb0005 > 0 {
- zb0005--
+ if zb0006 > 0 {
+ zb0006--
bts, err = (*z).GlobalDelta.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "GlobalDelta")
return
}
}
- if zb0005 > 0 {
- zb0005--
- var zb0007 int
- var zb0008 bool
- zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if zb0006 > 0 {
+ zb0006--
+ var zb0008 int
+ var zb0009 bool
+ zb0008, zb0009, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "LocalDeltas")
return
}
- if zb0007 > config.MaxEvalDeltaAccounts {
- err = msgp.ErrOverflow(uint64(zb0007), uint64(config.MaxEvalDeltaAccounts))
+ if zb0008 > config.MaxEvalDeltaAccounts {
+ err = msgp.ErrOverflow(uint64(zb0008), uint64(config.MaxEvalDeltaAccounts))
err = msgp.WrapError(err, "struct-from-array", "LocalDeltas")
return
}
- if zb0008 {
+ if zb0009 {
(*z).LocalDeltas = nil
} else if (*z).LocalDeltas == nil {
- (*z).LocalDeltas = make(map[uint64]basics.StateDelta, zb0007)
+ (*z).LocalDeltas = make(map[uint64]basics.StateDelta, zb0008)
}
- for zb0007 > 0 {
+ for zb0008 > 0 {
var zb0001 uint64
var zb0002 basics.StateDelta
- zb0007--
+ zb0008--
zb0001, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "LocalDeltas")
@@ -2011,66 +2027,95 @@ func (z *EvalDelta) UnmarshalMsg(bts []byte) (o []byte, err error) {
(*z).LocalDeltas[zb0001] = zb0002
}
}
- if zb0005 > 0 {
- zb0005--
- var zb0009 int
- var zb0010 bool
- zb0009, zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if zb0006 > 0 {
+ zb0006--
+ var zb0010 int
+ var zb0011 bool
+ zb0010, zb0011, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "SharedAccts")
+ return
+ }
+ if zb0010 > config.MaxEvalDeltaAccounts {
+ err = msgp.ErrOverflow(uint64(zb0010), uint64(config.MaxEvalDeltaAccounts))
+ err = msgp.WrapError(err, "struct-from-array", "SharedAccts")
+ return
+ }
+ if zb0011 {
+ (*z).SharedAccts = nil
+ } else if (*z).SharedAccts != nil && cap((*z).SharedAccts) >= zb0010 {
+ (*z).SharedAccts = ((*z).SharedAccts)[:zb0010]
+ } else {
+ (*z).SharedAccts = make([]basics.Address, zb0010)
+ }
+ for zb0003 := range (*z).SharedAccts {
+ bts, err = (*z).SharedAccts[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "SharedAccts", zb0003)
+ return
+ }
+ }
+ }
+ if zb0006 > 0 {
+ zb0006--
+ var zb0012 int
+ var zb0013 bool
+ zb0012, zb0013, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "Logs")
return
}
- if zb0009 > config.MaxLogCalls {
- err = msgp.ErrOverflow(uint64(zb0009), uint64(config.MaxLogCalls))
+ if zb0012 > config.MaxLogCalls {
+ err = msgp.ErrOverflow(uint64(zb0012), uint64(config.MaxLogCalls))
err = msgp.WrapError(err, "struct-from-array", "Logs")
return
}
- if zb0010 {
+ if zb0013 {
(*z).Logs = nil
- } else if (*z).Logs != nil && cap((*z).Logs) >= zb0009 {
- (*z).Logs = ((*z).Logs)[:zb0009]
+ } else if (*z).Logs != nil && cap((*z).Logs) >= zb0012 {
+ (*z).Logs = ((*z).Logs)[:zb0012]
} else {
- (*z).Logs = make([]string, zb0009)
+ (*z).Logs = make([]string, zb0012)
}
- for zb0003 := range (*z).Logs {
- (*z).Logs[zb0003], bts, err = msgp.ReadStringBytes(bts)
+ for zb0004 := range (*z).Logs {
+ (*z).Logs[zb0004], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Logs", zb0003)
+ err = msgp.WrapError(err, "struct-from-array", "Logs", zb0004)
return
}
}
}
- if zb0005 > 0 {
- zb0005--
- var zb0011 int
- var zb0012 bool
- zb0011, zb0012, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if zb0006 > 0 {
+ zb0006--
+ var zb0014 int
+ var zb0015 bool
+ zb0014, zb0015, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "InnerTxns")
return
}
- if zb0011 > config.MaxInnerTransactionsPerDelta {
- err = msgp.ErrOverflow(uint64(zb0011), uint64(config.MaxInnerTransactionsPerDelta))
+ if zb0014 > config.MaxInnerTransactionsPerDelta {
+ err = msgp.ErrOverflow(uint64(zb0014), uint64(config.MaxInnerTransactionsPerDelta))
err = msgp.WrapError(err, "struct-from-array", "InnerTxns")
return
}
- if zb0012 {
+ if zb0015 {
(*z).InnerTxns = nil
- } else if (*z).InnerTxns != nil && cap((*z).InnerTxns) >= zb0011 {
- (*z).InnerTxns = ((*z).InnerTxns)[:zb0011]
+ } else if (*z).InnerTxns != nil && cap((*z).InnerTxns) >= zb0014 {
+ (*z).InnerTxns = ((*z).InnerTxns)[:zb0014]
} else {
- (*z).InnerTxns = make([]SignedTxnWithAD, zb0011)
+ (*z).InnerTxns = make([]SignedTxnWithAD, zb0014)
}
- for zb0004 := range (*z).InnerTxns {
- bts, err = (*z).InnerTxns[zb0004].UnmarshalMsg(bts)
+ for zb0005 := range (*z).InnerTxns {
+ bts, err = (*z).InnerTxns[zb0005].UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "InnerTxns", zb0004)
+ err = msgp.WrapError(err, "struct-from-array", "InnerTxns", zb0005)
return
}
}
}
- if zb0005 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0005)
+ if zb0006 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0006)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
return
@@ -2081,11 +2126,11 @@ func (z *EvalDelta) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err)
return
}
- if zb0006 {
+ if zb0007 {
(*z) = EvalDelta{}
}
- for zb0005 > 0 {
- zb0005--
+ for zb0006 > 0 {
+ zb0006--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
@@ -2099,27 +2144,27 @@ func (z *EvalDelta) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "ld":
- var zb0013 int
- var zb0014 bool
- zb0013, zb0014, bts, err = msgp.ReadMapHeaderBytes(bts)
+ var zb0016 int
+ var zb0017 bool
+ zb0016, zb0017, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LocalDeltas")
return
}
- if zb0013 > config.MaxEvalDeltaAccounts {
- err = msgp.ErrOverflow(uint64(zb0013), uint64(config.MaxEvalDeltaAccounts))
+ if zb0016 > config.MaxEvalDeltaAccounts {
+ err = msgp.ErrOverflow(uint64(zb0016), uint64(config.MaxEvalDeltaAccounts))
err = msgp.WrapError(err, "LocalDeltas")
return
}
- if zb0014 {
+ if zb0017 {
(*z).LocalDeltas = nil
} else if (*z).LocalDeltas == nil {
- (*z).LocalDeltas = make(map[uint64]basics.StateDelta, zb0013)
+ (*z).LocalDeltas = make(map[uint64]basics.StateDelta, zb0016)
}
- for zb0013 > 0 {
+ for zb0016 > 0 {
var zb0001 uint64
var zb0002 basics.StateDelta
- zb0013--
+ zb0016--
zb0001, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "LocalDeltas")
@@ -2132,57 +2177,84 @@ func (z *EvalDelta) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
(*z).LocalDeltas[zb0001] = zb0002
}
+ case "sa":
+ var zb0018 int
+ var zb0019 bool
+ zb0018, zb0019, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "SharedAccts")
+ return
+ }
+ if zb0018 > config.MaxEvalDeltaAccounts {
+ err = msgp.ErrOverflow(uint64(zb0018), uint64(config.MaxEvalDeltaAccounts))
+ err = msgp.WrapError(err, "SharedAccts")
+ return
+ }
+ if zb0019 {
+ (*z).SharedAccts = nil
+ } else if (*z).SharedAccts != nil && cap((*z).SharedAccts) >= zb0018 {
+ (*z).SharedAccts = ((*z).SharedAccts)[:zb0018]
+ } else {
+ (*z).SharedAccts = make([]basics.Address, zb0018)
+ }
+ for zb0003 := range (*z).SharedAccts {
+ bts, err = (*z).SharedAccts[zb0003].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "SharedAccts", zb0003)
+ return
+ }
+ }
case "lg":
- var zb0015 int
- var zb0016 bool
- zb0015, zb0016, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0020 int
+ var zb0021 bool
+ zb0020, zb0021, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Logs")
return
}
- if zb0015 > config.MaxLogCalls {
- err = msgp.ErrOverflow(uint64(zb0015), uint64(config.MaxLogCalls))
+ if zb0020 > config.MaxLogCalls {
+ err = msgp.ErrOverflow(uint64(zb0020), uint64(config.MaxLogCalls))
err = msgp.WrapError(err, "Logs")
return
}
- if zb0016 {
+ if zb0021 {
(*z).Logs = nil
- } else if (*z).Logs != nil && cap((*z).Logs) >= zb0015 {
- (*z).Logs = ((*z).Logs)[:zb0015]
+ } else if (*z).Logs != nil && cap((*z).Logs) >= zb0020 {
+ (*z).Logs = ((*z).Logs)[:zb0020]
} else {
- (*z).Logs = make([]string, zb0015)
+ (*z).Logs = make([]string, zb0020)
}
- for zb0003 := range (*z).Logs {
- (*z).Logs[zb0003], bts, err = msgp.ReadStringBytes(bts)
+ for zb0004 := range (*z).Logs {
+ (*z).Logs[zb0004], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
- err = msgp.WrapError(err, "Logs", zb0003)
+ err = msgp.WrapError(err, "Logs", zb0004)
return
}
}
case "itx":
- var zb0017 int
- var zb0018 bool
- zb0017, zb0018, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0022 int
+ var zb0023 bool
+ zb0022, zb0023, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "InnerTxns")
return
}
- if zb0017 > config.MaxInnerTransactionsPerDelta {
- err = msgp.ErrOverflow(uint64(zb0017), uint64(config.MaxInnerTransactionsPerDelta))
+ if zb0022 > config.MaxInnerTransactionsPerDelta {
+ err = msgp.ErrOverflow(uint64(zb0022), uint64(config.MaxInnerTransactionsPerDelta))
err = msgp.WrapError(err, "InnerTxns")
return
}
- if zb0018 {
+ if zb0023 {
(*z).InnerTxns = nil
- } else if (*z).InnerTxns != nil && cap((*z).InnerTxns) >= zb0017 {
- (*z).InnerTxns = ((*z).InnerTxns)[:zb0017]
+ } else if (*z).InnerTxns != nil && cap((*z).InnerTxns) >= zb0022 {
+ (*z).InnerTxns = ((*z).InnerTxns)[:zb0022]
} else {
- (*z).InnerTxns = make([]SignedTxnWithAD, zb0017)
+ (*z).InnerTxns = make([]SignedTxnWithAD, zb0022)
}
- for zb0004 := range (*z).InnerTxns {
- bts, err = (*z).InnerTxns[zb0004].UnmarshalMsg(bts)
+ for zb0005 := range (*z).InnerTxns {
+ bts, err = (*z).InnerTxns[zb0005].UnmarshalMsg(bts)
if err != nil {
- err = msgp.WrapError(err, "InnerTxns", zb0004)
+ err = msgp.WrapError(err, "InnerTxns", zb0005)
return
}
}
@@ -2215,19 +2287,23 @@ func (z *EvalDelta) Msgsize() (s int) {
}
}
s += 3 + msgp.ArrayHeaderSize
- for zb0003 := range (*z).Logs {
- s += msgp.StringPrefixSize + len((*z).Logs[zb0003])
+ for zb0003 := range (*z).SharedAccts {
+ s += (*z).SharedAccts[zb0003].Msgsize()
+ }
+ s += 3 + msgp.ArrayHeaderSize
+ for zb0004 := range (*z).Logs {
+ s += msgp.StringPrefixSize + len((*z).Logs[zb0004])
}
s += 4 + msgp.ArrayHeaderSize
- for zb0004 := range (*z).InnerTxns {
- s += (*z).InnerTxns[zb0004].Msgsize()
+ for zb0005 := range (*z).InnerTxns {
+ s += (*z).InnerTxns[zb0005].Msgsize()
}
return
}
// MsgIsZero returns whether this is a zero value
func (z *EvalDelta) MsgIsZero() bool {
- return ((*z).GlobalDelta.MsgIsZero()) && (len((*z).LocalDeltas) == 0) && (len((*z).Logs) == 0) && (len((*z).InnerTxns) == 0)
+ return ((*z).GlobalDelta.MsgIsZero()) && (len((*z).LocalDeltas) == 0) && (len((*z).SharedAccts) == 0) && (len((*z).Logs) == 0) && (len((*z).InnerTxns) == 0)
}
// MarshalMsg implements msgp.Marshaler
diff --git a/data/transactions/teal.go b/data/transactions/teal.go
index 9472fd3b9..37388a0d9 100644
--- a/data/transactions/teal.go
+++ b/data/transactions/teal.go
@@ -32,9 +32,14 @@ type EvalDelta struct {
GlobalDelta basics.StateDelta `codec:"gd"`
// When decoding EvalDeltas, the integer key represents an offset into
- // [txn.Sender, txn.Accounts[0], txn.Accounts[1], ...]
+ // [txn.Sender, txn.Accounts[0], txn.Accounts[1], ..., SharedAccts[0], SharedAccts[1], ...]
LocalDeltas map[uint64]basics.StateDelta `codec:"ld,allocbound=config.MaxEvalDeltaAccounts"`
+ // If a program modifies the local of an account that is not the Sender, or
+ // in txn.Accounts, it must be recorded here, so that the key in LocalDeltas
+ // can refer to it.
+ SharedAccts []basics.Address `codec:"sa,allocbound=config.MaxEvalDeltaAccounts"`
+
Logs []string `codec:"lg,allocbound=config.MaxLogCalls"`
InnerTxns []SignedTxnWithAD `codec:"itx,allocbound=config.MaxInnerTransactionsPerDelta"`
diff --git a/data/transactions/transaction.go b/data/transactions/transaction.go
index 6c1b56fd2..5a397dc7c 100644
--- a/data/transactions/transaction.go
+++ b/data/transactions/transaction.go
@@ -118,7 +118,7 @@ type ApplyData struct {
// If asa or app is being created, the id used. Else 0.
// Names chosen to match naming the corresponding txn.
- // These are populated on when MaxInnerTransactions > 0 (TEAL 5)
+ // These are populated only when MaxInnerTransactions > 0 (TEAL 5)
ConfigAsset basics.AssetIndex `codec:"caid"`
ApplicationID basics.AppIndex `codec:"apid"`
}
@@ -477,6 +477,9 @@ func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusPa
if br.Index > uint64(len(tx.ForeignApps)) {
return fmt.Errorf("tx.Boxes[%d].Index is %d. Exceeds len(tx.ForeignApps)", i, br.Index)
}
+ if proto.EnableBoxRefNameError && len(br.Name) > proto.MaxAppKeyLen {
+ return fmt.Errorf("tx.Boxes[%d].Name too long, max len %d bytes", i, proto.MaxAppKeyLen)
+ }
}
if tx.LocalStateSchema.NumEntries() > proto.MaxLocalSchemaEntries {
diff --git a/data/transactions/transaction_test.go b/data/transactions/transaction_test.go
index 51f69ce71..0f3cf1825 100644
--- a/data/transactions/transaction_test.go
+++ b/data/transactions/transaction_test.go
@@ -274,6 +274,7 @@ func TestWellFormedErrors(t *testing.T) {
protoV27 := config.Consensus[protocol.ConsensusV27]
protoV28 := config.Consensus[protocol.ConsensusV28]
protoV32 := config.Consensus[protocol.ConsensusV32]
+ protoV36 := config.Consensus[protocol.ConsensusV36]
addr1, err := basics.UnmarshalChecksumAddress("NDQCJNNY5WWWFLP4GFZ7MEF2QJSMZYK6OWIV2AQ7OMAVLEFCGGRHFPKJJA")
require.NoError(t, err)
v5 := []byte{0x05}
@@ -566,6 +567,32 @@ func TestWellFormedErrors(t *testing.T) {
proto: protoV32,
expectedError: fmt.Errorf("tx.Boxes too long, max number of box references is 0"),
},
+ {
+ tx: Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: okHeader,
+ ApplicationCallTxnFields: ApplicationCallTxnFields{
+ ApplicationID: 1,
+ Boxes: []BoxRef{{Index: 1, Name: make([]byte, 65)}},
+ ForeignApps: []basics.AppIndex{1},
+ },
+ },
+ proto: futureProto,
+ expectedError: fmt.Errorf("tx.Boxes[0].Name too long, max len 64 bytes"),
+ },
+ {
+ tx: Transaction{
+ Type: protocol.ApplicationCallTx,
+ Header: okHeader,
+ ApplicationCallTxnFields: ApplicationCallTxnFields{
+ ApplicationID: 1,
+ Boxes: []BoxRef{{Index: 1, Name: make([]byte, 65)}},
+ ForeignApps: []basics.AppIndex{1},
+ },
+ },
+ proto: protoV36,
+ expectedError: nil,
+ },
}
for _, usecase := range usecases {
err := usecase.tx.WellFormed(specialAddr, usecase.proto)
diff --git a/data/transactions/verify/txn.go b/data/transactions/verify/txn.go
index 1d3605521..528d5ea5e 100644
--- a/data/transactions/verify/txn.go
+++ b/data/transactions/verify/txn.go
@@ -108,8 +108,11 @@ const (
// TxGroupError is an error from txn pre-validation (well form-ness, signature format, etc).
// It can be unwrapped into underlying error, as well as has a specific failure reason code.
type TxGroupError struct {
- err error
- Reason TxGroupErrorReason
+ err error
+ // GroupIndex is the index of the transaction in the group that failed. NOTE: this will be -1 if
+ // the error is not specific to a single transaction.
+ GroupIndex int
+ Reason TxGroupErrorReason
}
// Error returns an error message from the underlying error
@@ -155,16 +158,16 @@ func (g *GroupContext) Equal(other *GroupContext) bool {
// txnBatchPrep verifies a SignedTxn having no obviously inconsistent data.
// Block-assembly time checks of LogicSig and accounting rules may still block the txn.
// It is the caller responsibility to call batchVerifier.Verify().
-func txnBatchPrep(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContext, verifier *crypto.BatchVerifier, evalTracer logic.EvalTracer) *TxGroupError {
+func txnBatchPrep(s *transactions.SignedTxn, groupIndex int, groupCtx *GroupContext, verifier *crypto.BatchVerifier, evalTracer logic.EvalTracer) *TxGroupError {
if !groupCtx.consensusParams.SupportRekeying && (s.AuthAddr != basics.Address{}) {
- return &TxGroupError{err: errRekeyingNotSupported, Reason: TxGroupErrorReasonGeneric}
+ return &TxGroupError{err: errRekeyingNotSupported, GroupIndex: groupIndex, Reason: TxGroupErrorReasonGeneric}
}
if err := s.Txn.WellFormed(groupCtx.specAddrs, groupCtx.consensusParams); err != nil {
- return &TxGroupError{err: err, Reason: TxGroupErrorReasonNotWellFormed}
+ return &TxGroupError{err: err, GroupIndex: groupIndex, Reason: TxGroupErrorReasonNotWellFormed}
}
- return stxnCoreChecks(s, txnIdx, groupCtx, verifier, evalTracer)
+ return stxnCoreChecks(s, groupIndex, groupCtx, verifier, evalTracer)
}
// TxnGroup verifies a []SignedTxn as being signed and having no obviously inconsistent data.
@@ -219,7 +222,7 @@ func txnGroupBatchPrep(stxs []transactions.SignedTxn, contextHdr *bookkeeping.Bl
}
feeNeeded, overflow := basics.OMul(groupCtx.consensusParams.MinTxnFee, minFeeCount)
if overflow {
- err = &TxGroupError{err: errTxGroupInvalidFee, Reason: TxGroupErrorReasonInvalidFee}
+ err = &TxGroupError{err: errTxGroupInvalidFee, GroupIndex: -1, Reason: TxGroupErrorReasonInvalidFee}
return nil, err
}
// feesPaid may have saturated. That's ok. Since we know
@@ -230,7 +233,8 @@ func txnGroupBatchPrep(stxs []transactions.SignedTxn, contextHdr *bookkeeping.Bl
err: fmt.Errorf(
"txgroup had %d in fees, which is less than the minimum %d * %d",
feesPaid, minFeeCount, groupCtx.consensusParams.MinTxnFee),
- Reason: TxGroupErrorReasonInvalidFee,
+ GroupIndex: -1,
+ Reason: TxGroupErrorReasonInvalidFee,
}
return nil, err
}
@@ -246,7 +250,7 @@ const logicSig sigOrTxnType = 3
const stateProofTxn sigOrTxnType = 4
// checkTxnSigTypeCounts checks the number of signature types and reports an error in case of a violation
-func checkTxnSigTypeCounts(s *transactions.SignedTxn) (sigType sigOrTxnType, err *TxGroupError) {
+func checkTxnSigTypeCounts(s *transactions.SignedTxn, groupIndex int) (sigType sigOrTxnType, err *TxGroupError) {
numSigCategories := 0
if s.Sig != (crypto.Signature{}) {
numSigCategories++
@@ -268,17 +272,17 @@ func checkTxnSigTypeCounts(s *transactions.SignedTxn) (sigType sigOrTxnType, err
if s.Txn.Sender == transactions.StateProofSender && s.Txn.Type == protocol.StateProofTx {
return stateProofTxn, nil
}
- return 0, &TxGroupError{err: errTxnSigHasNoSig, Reason: TxGroupErrorReasonHasNoSig}
+ return 0, &TxGroupError{err: errTxnSigHasNoSig, GroupIndex: groupIndex, Reason: TxGroupErrorReasonHasNoSig}
}
if numSigCategories > 1 {
- return 0, &TxGroupError{err: errTxnSigNotWellFormed, Reason: TxGroupErrorReasonSigNotWellFormed}
+ return 0, &TxGroupError{err: errTxnSigNotWellFormed, GroupIndex: groupIndex, Reason: TxGroupErrorReasonSigNotWellFormed}
}
return sigType, nil
}
// stxnCoreChecks runs signatures validity checks and enqueues signature into batchVerifier for verification.
-func stxnCoreChecks(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContext, batchVerifier *crypto.BatchVerifier, evalTracer logic.EvalTracer) *TxGroupError {
- sigType, err := checkTxnSigTypeCounts(s)
+func stxnCoreChecks(s *transactions.SignedTxn, groupIndex int, groupCtx *GroupContext, batchVerifier *crypto.BatchVerifier, evalTracer logic.EvalTracer) *TxGroupError {
+ sigType, err := checkTxnSigTypeCounts(s, groupIndex)
if err != nil {
return err
}
@@ -289,7 +293,7 @@ func stxnCoreChecks(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContex
return nil
case multiSig:
if err := crypto.MultisigBatchPrep(s.Txn, crypto.Digest(s.Authorizer()), s.Msig, batchVerifier); err != nil {
- return &TxGroupError{err: fmt.Errorf("multisig validation failed: %w", err), Reason: TxGroupErrorReasonMsigNotWellFormed}
+ return &TxGroupError{err: fmt.Errorf("multisig validation failed: %w", err), GroupIndex: groupIndex, Reason: TxGroupErrorReasonMsigNotWellFormed}
}
counter := 0
for _, subsigi := range s.Msig.Subsigs {
@@ -307,8 +311,8 @@ func stxnCoreChecks(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContex
return nil
case logicSig:
- if err := logicSigVerify(s, txnIdx, groupCtx, evalTracer); err != nil {
- return &TxGroupError{err: err, Reason: TxGroupErrorReasonLogicSigFailed}
+ if err := logicSigVerify(s, groupIndex, groupCtx, evalTracer); err != nil {
+ return &TxGroupError{err: err, GroupIndex: groupIndex, Reason: TxGroupErrorReasonLogicSigFailed}
}
return nil
@@ -316,7 +320,7 @@ func stxnCoreChecks(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContex
return nil
default:
- return &TxGroupError{err: errUnknownSignature, Reason: TxGroupErrorReasonGeneric}
+ return &TxGroupError{err: errUnknownSignature, GroupIndex: groupIndex, Reason: TxGroupErrorReasonGeneric}
}
}
@@ -416,20 +420,6 @@ func logicSigSanityCheckBatchPrep(txn *transactions.SignedTxn, groupIndex int, g
return nil
}
-// LogicSigError represents a LogicSig evaluation which rejected or errored
-type LogicSigError struct {
- GroupIndex int
- err error
-}
-
-func (e LogicSigError) Error() string {
- return e.err.Error()
-}
-
-func (e LogicSigError) Unwrap() error {
- return e.err
-}
-
// logicSigVerify checks that the signature is valid, executing the program.
func logicSigVerify(txn *transactions.SignedTxn, groupIndex int, groupCtx *GroupContext, evalTracer logic.EvalTracer) error {
err := LogicSigSanityCheck(txn, groupIndex, groupCtx)
@@ -450,11 +440,11 @@ func logicSigVerify(txn *transactions.SignedTxn, groupIndex int, groupCtx *Group
pass, cx, err := logic.EvalSignatureFull(groupIndex, &ep)
if err != nil {
logicErrTotal.Inc(nil)
- return LogicSigError{groupIndex, fmt.Errorf("transaction %v: rejected by logic err=%w", txn.ID(), err)}
+ return fmt.Errorf("transaction %v: %w", txn.ID(), err)
}
if !pass {
logicRejTotal.Inc(nil)
- return LogicSigError{groupIndex, fmt.Errorf("transaction %v: rejected by logic", txn.ID())}
+ return fmt.Errorf("transaction %v: rejected by logic", txn.ID())
}
logicGoodTotal.Inc(nil)
logicCostTotal.AddUint64(uint64(cx.Cost()), nil)
diff --git a/data/transactions/verify/txnBatch.go b/data/transactions/verify/txnBatch.go
index 206ee221f..40bd2dfff 100644
--- a/data/transactions/verify/txnBatch.go
+++ b/data/transactions/verify/txnBatch.go
@@ -26,6 +26,7 @@ import (
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/util/execpool"
)
// UnverifiedTxnSigJob is the sig verification job passed to the Stream verifier
@@ -113,7 +114,7 @@ type LedgerForStreamVerifier interface {
BlockHdr(rnd basics.Round) (blk bookkeeping.BlockHeader, err error)
}
-func (tbp *txnSigBatchProcessor) Cleanup(pending []InputJob, err error) {
+func (tbp *txnSigBatchProcessor) Cleanup(pending []execpool.InputJob, err error) {
// report an error for the unchecked txns
// drop the messages without reporting if the receiver does not consume
for i := range pending {
@@ -122,7 +123,7 @@ func (tbp *txnSigBatchProcessor) Cleanup(pending []InputJob, err error) {
}
}
-func (tbp txnSigBatchProcessor) GetErredUnprocessed(ue InputJob, err error) {
+func (tbp txnSigBatchProcessor) GetErredUnprocessed(ue execpool.InputJob, err error) {
uelt := ue.(*UnverifiedTxnSigJob)
tbp.sendResult(uelt.TxnGroup, uelt.BacklogMessage, err)
}
@@ -143,7 +144,7 @@ func (tbp txnSigBatchProcessor) sendResult(veTxnGroup []transactions.SignedTxn,
// MakeSigVerifyJobProcessor returns the object implementing the stream verifier Helper interface
func MakeSigVerifyJobProcessor(ledger LedgerForStreamVerifier, cache VerifiedTransactionCache,
- resultChan chan<- *VerificationResult, droppedChan chan<- *UnverifiedTxnSigJob) (svp BatchProcessor, err error) {
+ resultChan chan<- *VerificationResult, droppedChan chan<- *UnverifiedTxnSigJob) (svp execpool.BatchProcessor, err error) {
latest := ledger.Latest()
latestHdr, err := ledger.BlockHdr(latest)
if err != nil {
@@ -162,14 +163,14 @@ func MakeSigVerifyJobProcessor(ledger LedgerForStreamVerifier, cache VerifiedTra
}, nil
}
-func (tbp *txnSigBatchProcessor) ProcessBatch(txns []InputJob) {
+func (tbp *txnSigBatchProcessor) ProcessBatch(txns []execpool.InputJob) {
batchVerifier, ctx := tbp.preProcessUnverifiedTxns(txns)
failed, err := batchVerifier.VerifyWithFeedback()
// this error can only be crypto.ErrBatchHasFailedSigs
tbp.postProcessVerifiedJobs(ctx, failed, err)
}
-func (tbp *txnSigBatchProcessor) preProcessUnverifiedTxns(uTxns []InputJob) (batchVerifier *crypto.BatchVerifier, ctx interface{}) {
+func (tbp *txnSigBatchProcessor) preProcessUnverifiedTxns(uTxns []execpool.InputJob) (batchVerifier *crypto.BatchVerifier, ctx interface{}) {
batchVerifier = crypto.MakeBatchVerifier()
bl := makeBatchLoad(len(uTxns))
// TODO: separate operations here, and get the sig verification inside the LogicSig to the batch here
@@ -193,7 +194,7 @@ func (tbp *txnSigBatchProcessor) preProcessUnverifiedTxns(uTxns []InputJob) (bat
func (ue UnverifiedTxnSigJob) GetNumberOfBatchableItems() (batchSigs uint64, err error) {
batchSigs = 0
for i := range ue.TxnGroup {
- count, err := getNumberOfBatchableSigsInTxn(&ue.TxnGroup[i])
+ count, err := getNumberOfBatchableSigsInTxn(&ue.TxnGroup[i], i)
if err != nil {
return 0, err
}
@@ -202,8 +203,8 @@ func (ue UnverifiedTxnSigJob) GetNumberOfBatchableItems() (batchSigs uint64, err
return
}
-func getNumberOfBatchableSigsInTxn(stx *transactions.SignedTxn) (uint64, error) {
- sigType, err := checkTxnSigTypeCounts(stx)
+func getNumberOfBatchableSigsInTxn(stx *transactions.SignedTxn, groupIndex int) (uint64, error) {
+ sigType, err := checkTxnSigTypeCounts(stx, groupIndex)
if err != nil {
return 0, err
}
diff --git a/data/transactions/verify/txnBatch_test.go b/data/transactions/verify/txnBatch_test.go
index a196a9dcd..aeced948c 100644
--- a/data/transactions/verify/txnBatch_test.go
+++ b/data/transactions/verify/txnBatch_test.go
@@ -46,7 +46,7 @@ import (
var droppedFromPool = metrics.MakeCounter(metrics.MetricName{Name: "test_streamVerifierTestCore_messages_dropped_pool", Description: "Test streamVerifierTestCore messages dropped from pool"})
func streamVerifierTestCore(txnGroups [][]transactions.SignedTxn, badTxnGroups map[uint64]struct{},
- expectedError error, t *testing.T) (sv *StreamToBatch) {
+ expectedError error, t *testing.T) (sv *execpool.StreamToBatch) {
numOfTxnGroups := len(txnGroups)
verificationPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, t)
@@ -57,12 +57,12 @@ func streamVerifierTestCore(txnGroups [][]transactions.SignedTxn, badTxnGroups m
defer cancel()
- inputChan := make(chan InputJob)
+ inputChan := make(chan execpool.InputJob)
resultChan := make(chan *VerificationResult, txBacklogSize)
droppedChan := make(chan *UnverifiedTxnSigJob)
ep, err := MakeSigVerifyJobProcessor(&DummyLedgerForSignature{}, cache, resultChan, droppedChan)
require.NoError(t, err)
- sv = MakeStreamToBatch(inputChan, verificationPool, ep)
+ sv = execpool.MakeStreamToBatch(inputChan, verificationPool, ep)
sv.Start(ctx)
wg := sync.WaitGroup{}
@@ -408,12 +408,12 @@ func TestStreamToBatchPoolShutdown(t *testing.T) { //nolint:paralleltest // Not
ctx, cancel := context.WithCancel(context.Background())
cache := MakeVerifiedTransactionCache(50000)
- inputChan := make(chan InputJob)
+ inputChan := make(chan execpool.InputJob)
resultChan := make(chan *VerificationResult, txBacklogSize)
droppedChan := make(chan *UnverifiedTxnSigJob)
ep, err := MakeSigVerifyJobProcessor(&DummyLedgerForSignature{}, cache, resultChan, droppedChan)
require.NoError(t, err)
- sv := MakeStreamToBatch(inputChan, verificationPool, ep)
+ sv := execpool.MakeStreamToBatch(inputChan, verificationPool, ep)
sv.Start(ctx)
errChan := make(chan error)
@@ -446,7 +446,7 @@ func TestStreamToBatchPoolShutdown(t *testing.T) { //nolint:paralleltest // Not
}
}()
for err := range errChan {
- require.ErrorIs(t, err, ErrShuttingDownError)
+ require.ErrorIs(t, err, execpool.ErrShuttingDownError)
}
require.Contains(t, logBuffer.String(), "addBatchToThePoolNow: EnqueueBacklog returned an error and StreamToBatch will stop: context canceled")
wg.Wait()
@@ -468,14 +468,14 @@ func TestStreamToBatchRestart(t *testing.T) {
cache := MakeVerifiedTransactionCache(50)
- inputChan := make(chan InputJob)
+ inputChan := make(chan execpool.InputJob)
resultChan := make(chan *VerificationResult, txBacklogSize)
droppedChan := make(chan *UnverifiedTxnSigJob)
ctx, cancel := context.WithCancel(context.Background())
ep, err := MakeSigVerifyJobProcessor(&DummyLedgerForSignature{}, cache, resultChan, droppedChan)
require.NoError(t, err)
- sv := MakeStreamToBatch(inputChan, verificationPool, ep)
+ sv := execpool.MakeStreamToBatch(inputChan, verificationPool, ep)
sv.Start(ctx)
errChan := make(chan error)
@@ -509,7 +509,7 @@ func TestStreamToBatchRestart(t *testing.T) {
cancel()
}()
for err := range errChan {
- require.ErrorIs(t, err, ErrShuttingDownError)
+ require.ErrorIs(t, err, execpool.ErrShuttingDownError)
}
wg.Wait()
sv.WaitForStop()
@@ -588,12 +588,12 @@ func TestStreamToBatchCtxCancel(t *testing.T) {
defer verificationPool.Shutdown()
ctx, cancel := context.WithCancel(context.Background())
cache := MakeVerifiedTransactionCache(50)
- inputChan := make(chan InputJob)
+ inputChan := make(chan execpool.InputJob)
resultChan := make(chan *VerificationResult, txBacklogSize)
droppedChan := make(chan *UnverifiedTxnSigJob)
ep, err := MakeSigVerifyJobProcessor(&DummyLedgerForSignature{}, cache, resultChan, droppedChan)
require.NoError(t, err)
- sv := MakeStreamToBatch(inputChan, verificationPool, ep)
+ sv := execpool.MakeStreamToBatch(inputChan, verificationPool, ep)
sv.Start(ctx)
var result *VerificationResult
@@ -620,7 +620,7 @@ func TestStreamToBatchCtxCancel(t *testing.T) {
close(holdTasks)
wg.Wait()
- require.ErrorIs(t, result.Err, ErrShuttingDownError)
+ require.ErrorIs(t, result.Err, execpool.ErrShuttingDownError)
}
// TestStreamToBatchCtxCancelPoolQueue tests the termination when the ctx is canceled
@@ -643,12 +643,12 @@ func TestStreamToBatchCtxCancelPoolQueue(t *testing.T) { //nolint:paralleltest /
ctx, cancel := context.WithCancel(context.Background())
cache := MakeVerifiedTransactionCache(50)
- inputChan := make(chan InputJob)
+ inputChan := make(chan execpool.InputJob)
resultChan := make(chan *VerificationResult, txBacklogSize)
droppedChan := make(chan *UnverifiedTxnSigJob)
ep, err := MakeSigVerifyJobProcessor(&DummyLedgerForSignature{}, cache, resultChan, droppedChan)
require.NoError(t, err)
- sv := MakeStreamToBatch(inputChan, verificationPool, ep)
+ sv := execpool.MakeStreamToBatch(inputChan, verificationPool, ep)
sv.Start(ctx)
var result *VerificationResult
@@ -659,7 +659,7 @@ func TestStreamToBatchCtxCancelPoolQueue(t *testing.T) { //nolint:paralleltest /
for {
result = <-resultChan
// at least one ErrShuttingDownError is expected
- if result.Err != ErrShuttingDownError {
+ if result.Err != execpool.ErrShuttingDownError {
continue
}
break
@@ -690,7 +690,7 @@ func TestStreamToBatchCtxCancelPoolQueue(t *testing.T) { //nolint:paralleltest /
// cancel the ctx as the sig is not yet sent to the exec pool
// the test might sporadically fail if between sending the txn above
// and the cancelation, 2 x waitForNextTxnDuration elapses (10ms)
- time.Sleep(6 * waitForNextJobDuration)
+ time.Sleep(12)
go func() {
// wait a bit before releasing the tasks, so that the verificationPool ctx first gets canceled
time.Sleep(20 * time.Millisecond)
@@ -703,7 +703,7 @@ func TestStreamToBatchCtxCancelPoolQueue(t *testing.T) { //nolint:paralleltest /
cancel()
wg.Wait()
- require.ErrorIs(t, result.Err, ErrShuttingDownError)
+ require.ErrorIs(t, result.Err, execpool.ErrShuttingDownError)
require.Contains(t, logBuffer.String(), "addBatchToThePoolNow: EnqueueBacklog returned an error and StreamToBatch will stop: context canceled")
}
@@ -725,12 +725,12 @@ func TestStreamToBatchPostVBlocked(t *testing.T) {
txBacklogSizeMod := txBacklogSize / 20
- inputChan := make(chan InputJob)
+ inputChan := make(chan execpool.InputJob)
resultChan := make(chan *VerificationResult, txBacklogSizeMod)
droppedChan := make(chan *UnverifiedTxnSigJob)
ep, err := MakeSigVerifyJobProcessor(&DummyLedgerForSignature{}, cache, resultChan, droppedChan)
require.NoError(t, err)
- sv := MakeStreamToBatch(inputChan, verificationPool, ep)
+ sv := execpool.MakeStreamToBatch(inputChan, verificationPool, ep)
defer close(droppedChan)
go func() {
@@ -768,7 +768,7 @@ func TestStreamToBatchPostVBlocked(t *testing.T) {
go processResults(ctx, errChan, resultChan, numOfTxnGroups-overflow, badTxnGroups, &badSigResultCounter, &goodSigResultCounter, &wg)
for err := range errChan {
- require.ErrorIs(t, err, ErrShuttingDownError)
+ require.ErrorIs(t, err, execpool.ErrShuttingDownError)
fmt.Println(badTxnGroups)
}
@@ -789,7 +789,7 @@ func TestStreamToBatchPostVBlocked(t *testing.T) {
}
for err := range errChan {
- require.ErrorIs(t, err, ErrShuttingDownError)
+ require.ErrorIs(t, err, execpool.ErrShuttingDownError)
fmt.Println(badTxnGroups)
}
@@ -818,13 +818,13 @@ func TestStreamToBatchCancelWhenPooled(t *testing.T) {
cache := MakeVerifiedTransactionCache(50)
- inputChan := make(chan InputJob)
+ inputChan := make(chan execpool.InputJob)
resultChan := make(chan *VerificationResult, txBacklogSize)
droppedChan := make(chan *UnverifiedTxnSigJob)
ctx, cancel := context.WithCancel(context.Background())
ep, err := MakeSigVerifyJobProcessor(&DummyLedgerForSignature{}, cache, resultChan, droppedChan)
require.NoError(t, err)
- sv := MakeStreamToBatch(inputChan, verificationPool, ep)
+ sv := execpool.MakeStreamToBatch(inputChan, verificationPool, ep)
sv.Start(ctx)
errChan := make(chan error)
@@ -849,7 +849,7 @@ func TestStreamToBatchCancelWhenPooled(t *testing.T) {
cancel()
}()
for err := range errChan {
- require.ErrorIs(t, err, ErrShuttingDownError)
+ require.ErrorIs(t, err, execpool.ErrShuttingDownError)
}
wg.Wait()
sv.WaitForStop()
diff --git a/data/transactions/verify/txn_test.go b/data/transactions/verify/txn_test.go
index caaeaf239..a26b0b099 100644
--- a/data/transactions/verify/txn_test.go
+++ b/data/transactions/verify/txn_test.go
@@ -266,14 +266,11 @@ func TestTxnValidationEmptySig(t *testing.T) {
}
}
-const spProto = protocol.ConsensusVersion("test-state-proof-enabled")
-
-func TestTxnValidationStateProof(t *testing.T) { //nolint:paralleltest // Not parallel because it modifies config.Consensus
+func TestTxnValidationStateProof(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- proto.StateProofInterval = 256
- config.Consensus[spProto] = proto
stxn := transactions.SignedTxn{
Txn: transactions.Transaction{
@@ -292,7 +289,7 @@ func TestTxnValidationStateProof(t *testing.T) { //nolint:paralleltest // Not pa
RewardsPool: poolAddr,
},
UpgradeState: bookkeeping.UpgradeState{
- CurrentProtocol: spProto,
+ CurrentProtocol: protocol.ConsensusCurrentVersion,
},
}
diff --git a/data/txHandler.go b/data/txHandler.go
index 9f1804444..3ad271670 100644
--- a/data/txHandler.go
+++ b/data/txHandler.go
@@ -122,11 +122,10 @@ type TxHandler struct {
net network.GossipNode
msgCache *txSaltedCache
txCanonicalCache *digestCache
- cacheConfig txHandlerConfig
ctx context.Context
ctxCancel context.CancelFunc
- streamVerifier *verify.StreamToBatch
- streamVerifierChan chan verify.InputJob
+ streamVerifier *execpool.StreamToBatch
+ streamVerifierChan chan execpool.InputJob
streamVerifierDropped chan *verify.UnverifiedTxnSigJob
erl *util.ElasticRateLimiter
}
@@ -142,12 +141,6 @@ type TxHandlerOpts struct {
Config config.Local
}
-// txHandlerConfig is a subset of tx handler related options from config.Local
-type txHandlerConfig struct {
- enableFilteringRawMsg bool
- enableFilteringCanonical bool
-}
-
// MakeTxHandler makes a new handler for transaction messages
func MakeTxHandler(opts TxHandlerOpts) (*TxHandler, error) {
@@ -174,13 +167,19 @@ func MakeTxHandler(opts TxHandlerOpts) (*TxHandler, error) {
backlogQueue: make(chan *txBacklogMsg, txBacklogSize),
postVerificationQueue: make(chan *verify.VerificationResult, txBacklogSize),
net: opts.Net,
- msgCache: makeSaltedCache(2 * txBacklogSize),
- txCanonicalCache: makeDigestCache(2 * txBacklogSize),
- cacheConfig: txHandlerConfig{opts.Config.TxFilterRawMsgEnabled(), opts.Config.TxFilterCanonicalEnabled()},
- streamVerifierChan: make(chan verify.InputJob),
+ streamVerifierChan: make(chan execpool.InputJob),
streamVerifierDropped: make(chan *verify.UnverifiedTxnSigJob),
}
+ // use defaultBacklogSize = approx number of txns in a full block as a parameter for the dedup cache size
+ defaultBacklogSize := config.GetDefaultLocal().TxBacklogSize
+ if opts.Config.TxFilterRawMsgEnabled() {
+ handler.msgCache = makeSaltedCache(2 * defaultBacklogSize)
+ }
+ if opts.Config.TxFilterCanonicalEnabled() {
+ handler.txCanonicalCache = makeDigestCache(2 * defaultBacklogSize)
+ }
+
if opts.Config.EnableTxBacklogRateLimiting {
rateLimiter := util.NewElasticRateLimiter(
txBacklogSize,
@@ -191,14 +190,14 @@ func MakeTxHandler(opts TxHandlerOpts) (*TxHandler, error) {
handler.erl = rateLimiter
}
- // prepare the transaction stream verifer
+ // prepare the transaction stream verifier
var err error
txnElementProcessor, err := verify.MakeSigVerifyJobProcessor(handler.ledger, handler.ledger.VerifiedTransactionCache(),
handler.postVerificationQueue, handler.streamVerifierDropped)
if err != nil {
return nil, err
}
- handler.streamVerifier = verify.MakeStreamToBatch(handler.streamVerifierChan, handler.txVerificationPool, txnElementProcessor)
+ handler.streamVerifier = execpool.MakeStreamToBatch(handler.streamVerifierChan, handler.txVerificationPool, txnElementProcessor)
go handler.droppedTxnWatcher()
return handler, nil
}
@@ -219,7 +218,9 @@ func (handler *TxHandler) droppedTxnWatcher() {
// Start enables the processing of incoming messages at the transaction handler
func (handler *TxHandler) Start() {
handler.ctx, handler.ctxCancel = context.WithCancel(context.Background())
- handler.msgCache.Start(handler.ctx, 60*time.Second)
+ if handler.msgCache != nil {
+ handler.msgCache.Start(handler.ctx, 60*time.Second)
+ }
handler.net.RegisterHandlers([]network.TaggedMessageHandler{
{Tag: protocol.TxnTag, MessageHandler: network.HandlerFunc(handler.processIncomingTxn)},
})
@@ -240,7 +241,9 @@ func (handler *TxHandler) Stop() {
}
handler.backlogWg.Wait()
handler.streamVerifier.WaitForStop()
- handler.msgCache.WaitForStop()
+ if handler.msgCache != nil {
+ handler.msgCache.WaitForStop()
+ }
}
func reencode(stxns []transactions.SignedTxn) []byte {
@@ -497,11 +500,11 @@ func (handler *TxHandler) postProcessCheckedTxn(wi *txBacklogMsg) {
}
func (handler *TxHandler) deleteFromCaches(msgKey *crypto.Digest, canonicalKey *crypto.Digest) {
- if handler.cacheConfig.enableFilteringCanonical && canonicalKey != nil {
+ if handler.txCanonicalCache != nil && canonicalKey != nil {
handler.txCanonicalCache.Delete(canonicalKey)
}
- if handler.cacheConfig.enableFilteringRawMsg && msgKey != nil {
+ if handler.msgCache != nil && msgKey != nil {
handler.msgCache.DeleteByKey(msgKey)
}
}
@@ -561,7 +564,7 @@ func (handler *TxHandler) dedupCanonical(ntx int, unverifiedTxGroup []transactio
func (handler *TxHandler) processIncomingTxn(rawmsg network.IncomingMessage) network.OutgoingMessage {
var msgKey *crypto.Digest
var isDup bool
- if handler.cacheConfig.enableFilteringRawMsg {
+ if handler.msgCache != nil {
// check for duplicate messages
// this helps against relaying duplicates
if msgKey, isDup = handler.msgCache.CheckAndPut(rawmsg.Data); isDup {
@@ -632,7 +635,7 @@ func (handler *TxHandler) processIncomingTxn(rawmsg network.IncomingMessage) net
}
var canonicalKey *crypto.Digest
- if handler.cacheConfig.enableFilteringCanonical {
+ if handler.txCanonicalCache != nil {
if canonicalKey, isDup = handler.dedupCanonical(ntx, unverifiedTxGroup, consumed); isDup {
transactionMessagesDupCanonical.Inc(nil)
return network.OutgoingMessage{Action: network.Ignore}
@@ -653,10 +656,10 @@ func (handler *TxHandler) processIncomingTxn(rawmsg network.IncomingMessage) net
transactionMessagesDroppedFromBacklog.Inc(nil)
// additionally, remove the txn from duplicate caches to ensure it can be re-submitted
- if canonicalKey != nil {
+ if handler.txCanonicalCache != nil && canonicalKey != nil {
handler.txCanonicalCache.Delete(canonicalKey)
}
- if msgKey != nil {
+ if handler.msgCache != nil && msgKey != nil {
handler.msgCache.DeleteByKey(msgKey)
}
}
diff --git a/data/txHandler_test.go b/data/txHandler_test.go
index f0ea2d5b4..f51290a37 100644
--- a/data/txHandler_test.go
+++ b/data/txHandler_test.go
@@ -55,13 +55,19 @@ import (
)
// txHandler uses config values to determine backlog size. Tests should use a static value
-var txBacklogSize = 26000
+var txBacklogSize = config.GetDefaultLocal().TxBacklogSize
// mock sender is used to implement OnClose, since TXHandlers expect to use Senders and ERL Clients
type mockSender struct{}
func (m mockSender) OnClose(func()) {}
+// txHandlerConfig is a subset of tx handler related options from config.Local
+type txHandlerConfig struct {
+ enableFilteringRawMsg bool
+ enableFilteringCanonical bool
+}
+
func makeTestGenesisAccounts(tb testing.TB, numUsers int) ([]basics.Address, []*crypto.SignatureSecrets, map[basics.Address]basics.AccountData) {
addresses := make([]basics.Address, numUsers)
secrets := make([]*crypto.SignatureSecrets, numUsers)
@@ -529,12 +535,25 @@ func BenchmarkTxHandlerIncDeDup(b *testing.B) {
numPoolWorkers := runtime.NumCPU()
dupFactor := test.dupFactor
avgDelay := test.workerDelay / time.Duration(numPoolWorkers)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
- handler := makeTestTxHandlerOrphaned(txBacklogSize)
+ var handler *TxHandler
if test.firstLevelOnly {
- handler.cacheConfig = txHandlerConfig{enableFilteringRawMsg: true, enableFilteringCanonical: false}
+ handler = makeTestTxHandlerOrphanedWithContext(
+ ctx, txBacklogSize, txBacklogSize,
+ txHandlerConfig{enableFilteringRawMsg: true, enableFilteringCanonical: false}, 0,
+ )
} else if !test.dedup {
- handler.cacheConfig = txHandlerConfig{}
+ handler = makeTestTxHandlerOrphanedWithContext(
+ ctx, txBacklogSize, 0,
+ txHandlerConfig{}, 0,
+ )
+ } else {
+ handler = makeTestTxHandlerOrphanedWithContext(
+ ctx, txBacklogSize, txBacklogSize,
+ txHandlerConfig{enableFilteringRawMsg: true, enableFilteringCanonical: true}, 0,
+ )
}
// prepare tx groups
@@ -783,12 +802,17 @@ func makeTestTxHandlerOrphanedWithContext(ctx context.Context, backlogSize int,
cacheSize = txBacklogSize
}
handler := &TxHandler{
- backlogQueue: make(chan *txBacklogMsg, backlogSize),
- msgCache: makeSaltedCache(cacheSize),
- txCanonicalCache: makeDigestCache(cacheSize),
- cacheConfig: txHandlerConfig,
+ backlogQueue: make(chan *txBacklogMsg, backlogSize),
+ }
+
+ if txHandlerConfig.enableFilteringRawMsg {
+ handler.msgCache = makeSaltedCache(cacheSize)
+ handler.msgCache.Start(ctx, refreshInterval)
}
- handler.msgCache.Start(ctx, refreshInterval)
+ if txHandlerConfig.enableFilteringCanonical {
+ handler.txCanonicalCache = makeDigestCache(cacheSize)
+ }
+
return handler
}
@@ -892,6 +916,8 @@ func TestTxHandlerProcessIncomingCacheRotation(t *testing.T) {
t.Run("scheduled", func(t *testing.T) {
// double enqueue a single txn message, ensure it discarded
ctx, cancelFunc := context.WithCancel(context.Background())
+ defer cancelFunc()
+
handler := makeTestTxHandlerOrphanedWithContext(ctx, txBacklogSize, txBacklogSize, txHandlerConfig{true, true}, 10*time.Millisecond)
var action network.OutgoingMessage
@@ -907,12 +933,15 @@ func TestTxHandlerProcessIncomingCacheRotation(t *testing.T) {
msg = <-handler.backlogQueue
require.Equal(t, 1, len(msg.unverifiedTxGroup))
require.Equal(t, stxns1[0], msg.unverifiedTxGroup[0])
- cancelFunc()
})
t.Run("manual", func(t *testing.T) {
// double enqueue a single txn message, ensure it discarded
- handler := makeTestTxHandlerOrphaned(txBacklogSize)
+ ctx, cancelFunc := context.WithCancel(context.Background())
+ defer cancelFunc()
+
+ handler := makeTestTxHandlerOrphanedWithContext(ctx, txBacklogSize, txBacklogSize, txHandlerConfig{true, true}, 10*time.Millisecond)
+
var action network.OutgoingMessage
var msg *txBacklogMsg
@@ -1684,17 +1713,16 @@ func runHandlerBenchmarkWithBacklog(b *testing.B, txGen txGenIf, tps int, useBac
cfg.IncomingConnectionsLimit = 10
ledger := txGen.makeLedger(b, cfg, log, fmt.Sprintf("%s-%d", b.Name(), b.N))
defer ledger.Close()
- handler, err := makeTestTxHandler(ledger, cfg)
- require.NoError(b, err)
- defer handler.txVerificationPool.Shutdown()
- defer close(handler.streamVerifierDropped)
// The benchmark generates only 1000 txns, and reuses them. This is done for faster benchmark time and the
// ability to have long runs without being limited to the memory. The dedup will block the txns once the same
// ones are rotated again. If the purpose is to test dedup, then this can be changed by setting
// genTCount = b.N
- handler.cacheConfig.enableFilteringRawMsg = false
- handler.cacheConfig.enableFilteringCanonical = false
+ cfg.TxIncomingFilteringFlags = 0
+ handler, err := makeTestTxHandler(ledger, cfg)
+ require.NoError(b, err)
+ defer handler.txVerificationPool.Shutdown()
+ defer close(handler.streamVerifierDropped)
// since Start is not called, set the context here
handler.ctx, handler.ctxCancel = context.WithCancel(context.Background())
diff --git a/data/txntest/txn.go b/data/txntest/txn.go
index 8e9f699f8..5815aacc6 100644
--- a/data/txntest/txn.go
+++ b/data/txntest/txn.go
@@ -182,8 +182,7 @@ func assemble(source interface{}) []byte {
}
ops, err := logic.AssembleString(program)
if err != nil {
- fmt.Printf("Bad program %v", ops.Errors)
- panic(ops.Errors)
+ panic(fmt.Sprintf("Bad program %v", ops.Errors))
}
return ops.Program
case []byte:
@@ -256,8 +255,8 @@ func (tx Txn) Txn() transactions.Transaction {
OnCompletion: tx.OnCompletion,
ApplicationArgs: tx.ApplicationArgs,
Accounts: tx.Accounts,
- ForeignApps: tx.ForeignApps,
- ForeignAssets: tx.ForeignAssets,
+ ForeignApps: append([]basics.AppIndex(nil), tx.ForeignApps...),
+ ForeignAssets: append([]basics.AssetIndex(nil), tx.ForeignAssets...),
Boxes: tx.Boxes,
LocalStateSchema: tx.LocalStateSchema,
GlobalStateSchema: tx.GlobalStateSchema,
diff --git a/docker/README.md b/docker/README.md
index 5db6b20a1..a97e3296d 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -6,15 +6,29 @@ General purpose algod container image.
## Image Configuration
-There are a number of special files and environment variables used to control how a container is started.
+Algorand maintains a Docker image with recent snapshot builds from our `master` branch on DockerHub to support users who prefer to run containerized processes. There are a couple of different images avaliable for running the latest stable or development versions of Algod.
+
+- `algorand/algod:latest` is the latest stable release version of Algod (default)
+- `algorand/algod:stable` is the latest stable version of Algod
+- `algorand/algod:{version}-stable` is the stable version of Algod at a specific version number
+- `algorand/algod:beta` is the version of Algod being considered for the next stable release
+- `algorand/algod:nightly` is the latest development version of Algod
+
+Algorand also publishes experimental versions of Algod.
+
+- `algorand/algod:{LONGSHA}` is a version containing a specific commit to `master`
+- `algorand/algod:master` is the version running on our `master` branch
+- `algorand/algod:feature-{branch}` is the latest version of Algod on any of the go-algorand feature branches
+
+Furthermore, There are a number of special files and environment variables used to control how a container is started. See below for more detail.
### Default Configuration
-By default the following config.json overrides are applied:
+The following config.json overrides are applied:
-| Setting | Value |
-| ------- | ----- |
-| EndpointAddress | 0.0.0.0:8080 |
+| Setting | Value | Description |
+| ------- | ----- | ----------- |
+| EndpointAddress | 0.0.0.0:8080 | Ensure the API is accessible from outside of the container. |
### Environment Variables
@@ -22,15 +36,17 @@ The following environment variables can be supplied. Except when noted, it is po
| Variable | Description |
| -------- | ----------- |
-| NETWORK | Leave blank for a private network, otherwise specify one of mainnet, betanet, testnet, or devnet. Only used during a data directory initialization. |
-| FAST_CATCHUP | If set to 1 on a public network, attempt to start fast-catchup during initial config. |
-| TELEMETRY_NAME| If set on a public network, telemetry is reported with this name. |
-| DEV_MODE | If set to 1 on a private network, enable dev mode. Only used during data directory initialization. |
-| NUM_ROUNDS | If set on a private network, override default of 30000 participation keys. |
-| TOKEN | If set, overrides the REST API token. |
-| ADMIN_TOKEN | If set, overrides the REST API admin token. |
-| KMD_TOKEN | If set along with `START_KMD`, override the KMD REST API token. |
-| START_KMD | When set to 1, start kmd service with no timeout. THIS SHOULD NOT BE USED IN PRODUCTION. |
+| NETWORK | Leave blank for a private network, otherwise specify one of mainnet, betanet, testnet, or devnet. Only used during a data directory initialization. |
+| PROFILE | If set, initializes the config.json file according to the given profile. |
+| DEV_MODE | If set to 1 on a private network, enable dev mode. Only used during data directory initialization. |
+| START_KMD | When set to 1, start kmd service with no timeout. THIS SHOULD NOT BE USED IN PRODUCTION. |
+| FAST_CATCHUP | If set to 1 on a public network, attempt to start fast-catchup during initial config. |
+| TOKEN | If set, overrides the REST API token. |
+| ADMIN_TOKEN | If set, overrides the REST API admin token. |
+| KMD_TOKEN | If set along with `START_KMD`, override the KMD REST API token. |
+| TELEMETRY_NAME | If set on a public network, telemetry is reported with this name. |
+| NUM_ROUNDS | If set on a private network, override default of 30000 participation keys. |
+| PEER_ADDRESS | If set, override phonebook with peer ip:port (or semicolon separated list: ip:port;ip:port;ip:port...) |
### Special Files
@@ -42,8 +58,7 @@ Configuration can be modified by specifying certain files. These can be changed
| /etc/algorand/algod.token | Override default randomized REST API token. |
| /etc/algorand/algod.admin.token | Override default randomized REST API admin token. |
| /etc/algorand/logging.config | Use a custom [logging.config](https://developer.algorand.org/docs/run-a-node/reference/telemetry-config/#configuration) file for configuring telemetry. |
-
-TODO: `/etc/algorand/template.json` for overriding the private network topology.
+ | /etc/algorand/template.json | Override default private network topology. One of the nodes in the template must be named "data".|
## Example Configuration
@@ -80,7 +95,7 @@ The data directory located at `/algod/data`. Mounting a volume at that location
### Volume Permissions
-The container executes in the context of the `algorand` user with it's own UID and GID which is handled differently depending on your operating system. Here are a few options for how to work with this environment:
+The container executes in the context of the `algorand` user with UID=999 and GID=999 which is handled differently depending on your operating system or deployment platform. During startup the container temporarily runs as `root` in order to modify the permissions of `/algod/data`. It then changes to the `algorand` user. This can sometimes cause problems, for example if your deployment platform doesn't allow containers to run as the root user.
#### Named Volume
@@ -91,23 +106,9 @@ docker volume create algod-data
docker run -it --rm -d -v algod-data:/algod/data algorand/algod
```
-#### Local Directory without SELinux
-
-Explicitly set the UID and GID of the container:
-
-```bash
-docker run -it --rm -d -v /srv/data:/algod/data -u $UID:$GID algorand/algod
-```
-
-#### Local Directory with SELinux
-
-Set the UID and GID of the container while add the `Z` option to the volume definition:
-
-```bash
-docker run -it --rm -d -v /srv/data:/algod/data:Z -u $UID:$GID algorand/algod
-```
+#### Use specific UID and GID
-> See the documentation on [configuring the selinux label](https://docs.docker.com/storage/bind-mounts/#configure-the-selinux-label).
+On the host system, ensure the directory being mounted uses UID=999 and GID=999. If the directory already has these permissions you may override the default user with `-u 999:999`.
### Private Network
diff --git a/docker/files/run/devmode_template.json b/docker/files/run/devmode_template.json
index 8e756502b..c48a6bc1c 100644
--- a/docker/files/run/devmode_template.json
+++ b/docker/files/run/devmode_template.json
@@ -21,7 +21,8 @@
"Online": true
}
],
- "DevMode": true
+ "DevMode": true,
+ "RewardsPoolBalance": 0
},
"Nodes": [
{
diff --git a/docker/files/run/followermode_template.json b/docker/files/run/followermode_template.json
new file mode 100644
index 000000000..ed074587e
--- /dev/null
+++ b/docker/files/run/followermode_template.json
@@ -0,0 +1,51 @@
+{
+ "Genesis": {
+ "ConsensusProtocol": "future",
+ "NetworkName": "followermodenet",
+ "FirstPartKeyRound": 0,
+ "LastPartKeyRound": NUM_ROUNDS,
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 40,
+ "Online": true
+ },
+ {
+ "Name": "Wallet2",
+ "Stake": 40,
+ "Online": true
+ },
+ {
+ "Name": "Wallet3",
+ "Stake": 20,
+ "Online": true
+ }
+ ],
+ "DevMode": true
+ },
+ "Nodes": [
+ {
+ "Name": "data",
+ "IsRelay": true,
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet2",
+ "ParticipationOnly": false
+ },
+ {
+ "Name": "Wallet3",
+ "ParticipationOnly": false
+ }
+ ]
+ },
+ {
+ "Name": "follower",
+ "IsRelay": false,
+ "ConfigJSONOverride": "{\"EnableFollowMode\":true,\"EndpointAddress\":\"0.0.0.0:8081\"}"
+ }
+ ]
+}
diff --git a/docker/files/run/run.sh b/docker/files/run/run.sh
index bd4e79fe0..f6b12798f 100755
--- a/docker/files/run/run.sh
+++ b/docker/files/run/run.sh
@@ -6,9 +6,17 @@ if [ "$DEBUG" = "1" ]; then
set -x
fi
+# To allow mounting the data directory we need to change permissions
+# to our algorand user. The script is initially run as the root user
+# in order to change permissions, afterwards the script is re-launched
+# as the algorand user.
+if [ "$(id -u)" = '0' ]; then
+ chown -R algorand:algorand $ALGORAND_DATA
+ exec runuser -u algorand "$BASH_SOURCE"
+fi
+
# Script to configure or resume a network. Based on environment settings the
# node will be setup with a private network or connect to a public network.
-
####################
# Helper functions #
####################
@@ -35,13 +43,17 @@ function start_public_network() {
catchup &
fi
- # redirect output to stdout
- algod -o
+ if [ "$PEER_ADDRESS" != "" ]; then
+ printf "$PEER_ADDRESS"
+ algod -o -p $PEER_ADDRESS
+ else
+ # redirect output to stdout
+ algod -o
+ fi
}
function configure_data_dir() {
cd "$ALGORAND_DATA"
- algocfg -d . set -p EndpointAddress -v "0.0.0.0:${ALGOD_PORT}"
# check for config file overrides.
if [ -f "/etc/algorand/config.json" ]; then
@@ -57,14 +69,24 @@ function configure_data_dir() {
cp /etc/algorand/logging.config logging.config
fi
- # check for token overrides
- if [ "$TOKEN" != "" ]; then
- echo "$TOKEN" >algod.token
- fi
- if [ "$ADMIN_TOKEN" != "" ]; then
- echo "$ADMIN_TOKEN" >algod.admin.token
+ # initialize config with profile.
+ if [ "$PROFILE" != "" ]; then
+ algocfg profile set --yes -d "$ALGORAND_DATA" "$PROFILE"
fi
+ # call after copying config.json to make sure the port is exposed.
+ algocfg -d . set -p EndpointAddress -v "0.0.0.0:${ALGOD_PORT}"
+
+ # check for token overrides
+ for dir in ${ALGORAND_DATA}/../*/; do
+ if [ "$TOKEN" != "" ]; then
+ echo "$TOKEN" > "$dir/algod.token"
+ fi
+ if [ "$ADMIN_TOKEN" != "" ]; then
+ echo "$ADMIN_TOKEN" > "$dir/algod.admin.token"
+ fi
+ done
+
# configure telemetry
if [ "$TELEMETRY_NAME" != "" ]; then
diagcfg telemetry name -n "$TELEMETRY_NAME" -d "$ALGORAND_DATA"
@@ -142,8 +164,12 @@ function start_private_network() {
function start_new_private_network() {
local TEMPLATE="template.json"
- if [ "$DEV_MODE" = "1" ]; then
- TEMPLATE="devmode_template.json"
+ if [ -f "/etc/algorand/template.json" ]; then
+ cp /etc/algorand/template.json "/node/run/$TEMPLATE"
+ else
+ if [ "$DEV_MODE" = "1" ]; then
+ TEMPLATE="devmode_template.json"
+ fi
fi
sed -i "s/NUM_ROUNDS/${NUM_ROUNDS:-30000}/" "/node/run/$TEMPLATE"
goal network create --noclean -n dockernet -r "${ALGORAND_DATA}/.." -t "/node/run/$TEMPLATE"
@@ -158,13 +184,17 @@ function start_new_private_network() {
echo "Starting Algod Docker Container"
echo " ALGORAND_DATA: $ALGORAND_DATA"
echo " NETWORK: $NETWORK"
-echo " ALGOD_PORT: $ALGOD_PORT"
-echo " FAST_CATCHUP: $FAST_CATCHUP"
+echo " PROFILE: $PROFILE"
echo " DEV_MODE: $DEV_MODE"
+echo " START_KMD: ${START_KMD:-"Not Set"}"
+echo " FAST_CATCHUP: $FAST_CATCHUP"
echo " TOKEN: ${TOKEN:-"Not Set"}"
+echo " ADMIN_TOKEN: ${ADMIN_TOKEN:-"Not Set"}"
echo " KMD_TOKEN: ${KMD_TOKEN:-"Not Set"}"
echo " TELEMETRY_NAME: $TELEMETRY_NAME"
-echo " START_KMD: ${START_KMD:-"Not Set"}"
+echo " NUM_ROUNDS: $NUM_ROUNDS"
+echo " PEER_ADDRESS: $PEER_ADDRESS"
+echo " ALGOD_PORT: $ALGOD_PORT"
# If data directory is initialized, start existing environment.
if [ -f "$ALGORAND_DATA/../network.json" ]; then
diff --git a/docs/agreement_service.md b/docs/agreement_service.md
index 28d760054..7d343d8df 100644
--- a/docs/agreement_service.md
+++ b/docs/agreement_service.md
@@ -4,10 +4,6 @@ The Algorand Agreement Service manages the consensus protocol and is composed of
The agreement service learns what blocks the community has reached consensus on and writes them to the ledger. When your account is called upon to propose a block or serve on a committee, the agreement service handles that too. Other parts of the `agreement` package deal with related issues -- asking the network for blocks we're missing (and helping peers who are missing blocks), generally figuring out how to handle incoming messages from the network, etc. Most of the complicated logic lives in this package (along with most of the subtle concurrency issues). Reading the Algorand SOSP paper before diving into this package will be super helpful for understanding what's going on.
-A few subpackages are maybe not used anymore but I'm not confident enough to delete them myself (Adam, 2018-02-28):
- - `debug` contains a couple of python scripts for visualizing / extracting useful info from the logfiles produced by the agreement service. These might still be useful once we have an end-to-end test running again. (It also contains obsolete logging functions that have been replaced by `logging`)
- - `util` as a top-level package contained a few utility functions that I'm pretty sure have migrated into the relevant subpackages
-
## Dispatcher
The dispatcher provides an interface between the Agreement Service components and the Algorand Network. Incoming messages are distributed between the Vote Manager and Proposal Manager. Outgoing Votes and Proposals generated by the Consensus State Machine are passed through the Dispatcher.
diff --git a/go.mod b/go.mod
index a63e13e5c..f490570eb 100644
--- a/go.mod
+++ b/go.mod
@@ -6,13 +6,13 @@ require (
github.com/DataDog/zstd v1.5.2
github.com/algorand/avm-abi v0.2.0
github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414
- github.com/algorand/go-codec/codec v1.1.8
+ github.com/algorand/go-codec/codec v1.1.9
github.com/algorand/go-deadlock v0.2.2
github.com/algorand/go-sumhash v0.1.0
github.com/algorand/graphtrace v0.1.0
github.com/algorand/msgp v1.1.53
github.com/algorand/oapi-codegen v1.12.0-algorand.0
- github.com/algorand/websocket v1.4.5
+ github.com/algorand/websocket v1.4.6
github.com/aws/aws-sdk-go v1.33.0
github.com/consensys/gnark-crypto v0.7.0
github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018
@@ -33,8 +33,8 @@ require (
github.com/spf13/cobra v0.0.3
github.com/stretchr/testify v1.8.1
golang.org/x/crypto v0.1.0
- golang.org/x/sys v0.1.0
- golang.org/x/text v0.4.0
+ golang.org/x/sys v0.7.0
+ golang.org/x/text v0.9.0
gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009
)
@@ -66,8 +66,8 @@ require (
github.com/stretchr/objx v0.5.0 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasttemplate v1.2.1 // indirect
- golang.org/x/net v0.1.0 // indirect
- golang.org/x/term v0.1.0 // indirect
+ golang.org/x/net v0.9.0 // indirect
+ golang.org/x/term v0.7.0 // indirect
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
diff --git a/go.sum b/go.sum
index 0bf38663b..972819621 100644
--- a/go.sum
+++ b/go.sum
@@ -5,9 +5,8 @@ github.com/algorand/avm-abi v0.2.0 h1:bkjsG+BOEcxUcnGSALLosmltE0JZdg+ZisXKx0UDX2
github.com/algorand/avm-abi v0.2.0/go.mod h1:+CgwM46dithy850bpTeHh9MC99zpn2Snirb3QTl2O/g=
github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414 h1:nwYN+GQ7Z5OOfZwqBO1ma7DSlP7S1YrKWICOyjkwqrc=
github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414/go.mod h1:OkQyHlGvS0kLNcIWbC21/uQcnbfwSOQm+wiqWwBG9pQ=
-github.com/algorand/go-codec v1.1.8/go.mod h1:XhzVs6VVyWMLu6cApb9/192gBjGRVGm5cX5j203Heg4=
-github.com/algorand/go-codec/codec v1.1.8 h1:lsFuhcOH2LiEhpBH3BVUUkdevVmwCRyvb7FCAAPeY6U=
-github.com/algorand/go-codec/codec v1.1.8/go.mod h1:tQ3zAJ6ijTps6V+wp8KsGDnPC2uhHVC7ANyrtkIY0bA=
+github.com/algorand/go-codec/codec v1.1.9 h1:el4HFSPZhP+YCgOZxeFGB/BqlNkaUIs55xcALulUTCM=
+github.com/algorand/go-codec/codec v1.1.9/go.mod h1:YkEx5nmr/zuCeaDYOIhlDg92Lxju8tj2d2NrYqP7g7k=
github.com/algorand/go-deadlock v0.2.2 h1:L7AKATSUCzoeVuOgpTipfCEjdUu5ECmlje8R7lP9DOY=
github.com/algorand/go-deadlock v0.2.2/go.mod h1:Hat1OXKqKNUcN/iv74FjGhF4hsOE2l7gOgQ9ZVIq6Fk=
github.com/algorand/go-sumhash v0.1.0 h1:b/QRhyLuF//vOcicBIxBXYW8bERNoeLxieht/dUYpVg=
@@ -18,8 +17,8 @@ github.com/algorand/msgp v1.1.53 h1:D6HKLyvLE6ltfsf8Apsrc+kqYb/CcOZEAfh1DpkPrNg=
github.com/algorand/msgp v1.1.53/go.mod h1:5K3d58/poT5fPmtiwuQft6GjgSrVEM46KoXdLrID8ZU=
github.com/algorand/oapi-codegen v1.12.0-algorand.0 h1:W9PvED+wAJc+9EeXPONnA+0zE9UhynEqoDs4OgAxKhk=
github.com/algorand/oapi-codegen v1.12.0-algorand.0/go.mod h1:tIWJ9K/qrLDVDt5A1p82UmxZIEGxv2X+uoujdhEAL48=
-github.com/algorand/websocket v1.4.5 h1:Cs6UTaCReAl02evYxmN8k57cNHmBILRcspfSxYg4AJE=
-github.com/algorand/websocket v1.4.5/go.mod h1:79n6FSZY08yQagHzE/YWZqTPBYfY5wc3IS+UTZe1W5c=
+github.com/algorand/websocket v1.4.6 h1:I0kV4EYwatuUrKtNiwzYYgojgwh6pksDmlqntKG2Woc=
+github.com/algorand/websocket v1.4.6/go.mod h1:HJmdGzFtnlUQ4nTzZP6WrT29oGYf1t6Ybi64vROcT+M=
github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=
github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
github.com/aws/aws-sdk-go v1.33.0 h1:Bq5Y6VTLbfnJp1IV8EL/qUU5qO1DYHda/zis/sqevkY=
@@ -154,8 +153,8 @@ golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
-golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -170,17 +169,17 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
-golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw=
-golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
+golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
-golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 h1:Hir2P/De0WpUhtrKGGjvSb2YxUgyZ7EFOSLIcSSpiwE=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
diff --git a/installer/config.json.example b/installer/config.json.example
index 3eefcf69e..4ae34a571 100644
--- a/installer/config.json.example
+++ b/installer/config.json.example
@@ -54,6 +54,7 @@
"EnableRuntimeMetrics": false,
"EnableTopAccountsReporting": false,
"EnableTxBacklogRateLimiting": false,
+ "EnableTxnEvalTracer": false,
"EnableUsageLog": false,
"EnableVerbosedTransactionSyncLogging": false,
"EndpointAddress": "127.0.0.1:0",
diff --git a/ledger/acctdeltas.go b/ledger/acctdeltas.go
index fcc937e01..e587a8fcf 100644
--- a/ledger/acctdeltas.go
+++ b/ledger/acctdeltas.go
@@ -759,13 +759,15 @@ func accountsNewRoundImpl(
// if we didn't had it before, and we don't have anything now, just skip it.
} else {
// create a new entry.
+ var ref trackerdb.AccountRef
normBalance := data.newAcct.NormalizedOnlineBalance(proto)
- ref, err := writer.InsertAccount(data.address, normBalance, data.newAcct)
- if err == nil {
- updatedAccounts[updatedAccountIdx].Ref = ref
- updatedAccounts[updatedAccountIdx].AccountData = data.newAcct
- newAddressesRowIDs[data.address] = ref
+ ref, err = writer.InsertAccount(data.address, normBalance, data.newAcct)
+ if err != nil {
+ return nil, nil, nil, err
}
+ updatedAccounts[updatedAccountIdx].Ref = ref
+ updatedAccounts[updatedAccountIdx].AccountData = data.newAcct
+ newAddressesRowIDs[data.address] = ref
}
} else {
// non-zero rowid means we had a previous value.
@@ -773,33 +775,33 @@ func accountsNewRoundImpl(
// new value is zero, which means we need to delete the current value.
var rowsAffected int64
rowsAffected, err = writer.DeleteAccount(data.oldAcct.Ref)
- if err == nil {
- // we deleted the entry successfully.
- updatedAccounts[updatedAccountIdx].Ref = nil
- updatedAccounts[updatedAccountIdx].AccountData = trackerdb.BaseAccountData{}
- if rowsAffected != 1 {
- err = fmt.Errorf("failed to delete accountbase row for account %v, rowid %d", data.address, data.oldAcct.Ref)
- }
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ // we deleted the entry successfully.
+ updatedAccounts[updatedAccountIdx].Ref = nil
+ updatedAccounts[updatedAccountIdx].AccountData = trackerdb.BaseAccountData{}
+ if rowsAffected != 1 {
+ err = fmt.Errorf("failed to delete accountbase row for account %v, rowid %d", data.address, data.oldAcct.Ref)
+ return nil, nil, nil, err
}
} else {
var rowsAffected int64
normBalance := data.newAcct.NormalizedOnlineBalance(proto)
rowsAffected, err = writer.UpdateAccount(data.oldAcct.Ref, normBalance, data.newAcct)
- if err == nil {
- // rowid doesn't change on update.
- updatedAccounts[updatedAccountIdx].Ref = data.oldAcct.Ref
- updatedAccounts[updatedAccountIdx].AccountData = data.newAcct
- if rowsAffected != 1 {
- err = fmt.Errorf("failed to update accountbase row for account %v, rowid %d", data.address, data.oldAcct.Ref)
- }
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ // rowid doesn't change on update.
+ updatedAccounts[updatedAccountIdx].Ref = data.oldAcct.Ref
+ updatedAccounts[updatedAccountIdx].AccountData = data.newAcct
+ if rowsAffected != 1 {
+ err = fmt.Errorf("failed to update accountbase row for account %v, rowid %d", data.address, data.oldAcct.Ref)
+ return nil, nil, nil, err
}
}
}
- if err != nil {
- return
- }
-
// set the returned persisted account states so that we could store that as the baseAccounts in commitRound
updatedAccounts[updatedAccountIdx].Round = lastUpdateRound
updatedAccounts[updatedAccountIdx].Addr = data.address
@@ -850,7 +852,7 @@ func accountsNewRoundImpl(
acctRef = newAddressesRowIDs[addr]
if acctRef == nil && !inMemEntry {
err = fmt.Errorf("cannot resolve address %s (%d), aidx %d, data %v", addr.String(), acctRef, aidx, data.newResource)
- return
+ return nil, nil, nil, err
}
}
var entry trackerdb.PersistedResourcesData
@@ -867,7 +869,7 @@ func accountsNewRoundImpl(
// create a new entry.
if !data.newResource.IsApp() && !data.newResource.IsAsset() {
err = fmt.Errorf("unknown creatable for addr %v (%d), aidx %d, data %v", addr, acctRef, aidx, data.newResource)
- return
+ return nil, nil, nil, err
}
// check if we need to "upgrade" this insert operation into an update operation due to a scheduled
// delete operation of the same resource.
@@ -877,19 +879,22 @@ func accountsNewRoundImpl(
delete(pendingResourcesDeletion, resourceKey{acctRef: acctRef, aidx: aidx})
var rowsAffected int64
rowsAffected, err = writer.UpdateResource(acctRef, aidx, data.newResource)
- if err == nil {
- // rowid doesn't change on update.
- entry = trackerdb.PersistedResourcesData{AcctRef: acctRef, Aidx: aidx, Data: data.newResource, Round: lastUpdateRound}
- if rowsAffected != 1 {
- err = fmt.Errorf("failed to update resources row for addr %s (%d), aidx %d", addr, acctRef, aidx)
- }
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ // rowid doesn't change on update.
+ entry = trackerdb.PersistedResourcesData{AcctRef: acctRef, Aidx: aidx, Data: data.newResource, Round: lastUpdateRound}
+ if rowsAffected != 1 {
+ err = fmt.Errorf("failed to update resources row for addr %s (%d), aidx %d", addr, acctRef, aidx)
+ return nil, nil, nil, err
}
} else {
_, err = writer.InsertResource(acctRef, aidx, data.newResource)
- if err == nil {
- // set the returned persisted account states so that we could store that as the baseResources in commitRound
- entry = trackerdb.PersistedResourcesData{AcctRef: acctRef, Aidx: aidx, Data: data.newResource, Round: lastUpdateRound}
+ if err != nil {
+ return nil, nil, nil, err
}
+ // set the returned persisted account states so that we could store that as the baseResources in commitRound
+ entry = trackerdb.PersistedResourcesData{AcctRef: acctRef, Aidx: aidx, Data: data.newResource, Round: lastUpdateRound}
}
}
} else {
@@ -901,24 +906,22 @@ func accountsNewRoundImpl(
} else {
if !data.newResource.IsApp() && !data.newResource.IsAsset() {
err = fmt.Errorf("unknown creatable for addr %v (%d), aidx %d, data %v", addr, acctRef, aidx, data.newResource)
- return
+ return nil, nil, nil, err
}
var rowsAffected int64
rowsAffected, err = writer.UpdateResource(acctRef, aidx, data.newResource)
- if err == nil {
- // rowid doesn't change on update.
- entry = trackerdb.PersistedResourcesData{AcctRef: acctRef, Aidx: aidx, Data: data.newResource, Round: lastUpdateRound}
- if rowsAffected != 1 {
- err = fmt.Errorf("failed to update resources row for addr %s (%d), aidx %d", addr, acctRef, aidx)
- }
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ // rowid doesn't change on update.
+ entry = trackerdb.PersistedResourcesData{AcctRef: acctRef, Aidx: aidx, Data: data.newResource, Round: lastUpdateRound}
+ if rowsAffected != 1 {
+ err = fmt.Errorf("failed to update resources row for addr %s (%d), aidx %d", addr, acctRef, aidx)
+ return nil, nil, nil, err
}
}
}
- if err != nil {
- return
- }
-
deltas := updatedResources[addr]
deltas = append(deltas, entry)
updatedResources[addr] = deltas
@@ -929,16 +932,15 @@ func accountsNewRoundImpl(
// new value is zero, which means we need to delete the current value.
var rowsAffected int64
rowsAffected, err = writer.DeleteResource(delRes.acctRef, delRes.aidx)
- if err == nil {
- // we deleted the entry successfully.
- // set zero addrid to mark this entry invalid for subsequent addr to addrid resolution
- // because the base account might gone.
- if rowsAffected != 1 {
- err = fmt.Errorf("failed to delete resources row (%d), aidx %d", delRes.acctRef, delRes.aidx)
- }
- }
if err != nil {
- return
+ return nil, nil, nil, err
+ }
+ // we deleted the entry successfully.
+ // set zero addrid to mark this entry invalid for subsequent addr to addrid resolution
+ // because the base account might gone.
+ if rowsAffected != 1 {
+ err = fmt.Errorf("failed to delete resources row (%d), aidx %d", delRes.acctRef, delRes.aidx)
+ return nil, nil, nil, err
}
}
@@ -950,17 +952,20 @@ func accountsNewRoundImpl(
continue // changed back within the delta span
}
err = writer.UpsertKvPair(key, mv.data)
+ if err != nil {
+ return nil, nil, nil, err
+ }
updatedKVs[key] = trackerdb.PersistedKVData{Value: mv.data, Round: lastUpdateRound}
} else {
if mv.oldData == nil { // Came and went within the delta span
continue
}
err = writer.DeleteKvPair(key)
+ if err != nil {
+ return nil, nil, nil, err
+ }
updatedKVs[key] = trackerdb.PersistedKVData{Value: nil, Round: lastUpdateRound}
}
- if err != nil {
- return
- }
}
for cidx, cdelta := range creatables {
@@ -970,7 +975,7 @@ func accountsNewRoundImpl(
_, err = writer.DeleteCreatable(cidx, cdelta.Ctype)
}
if err != nil {
- return
+ return nil, nil, nil, err
}
}
@@ -998,25 +1003,27 @@ func onlineAccountsNewRoundImpl(
if newStatus == basics.Online {
if newAcct.IsVotingEmpty() {
err = fmt.Errorf("empty voting data for online account %s: %v", data.address.String(), newAcct)
- } else {
- // create a new entry.
- var ref trackerdb.OnlineAccountRef
- normBalance := newAcct.NormalizedOnlineBalance(proto)
- ref, err = writer.InsertOnlineAccount(data.address, normBalance, newAcct, updRound, uint64(newAcct.VoteLastValid))
- if err == nil {
- updated := trackerdb.PersistedOnlineAccountData{
- Addr: data.address,
- AccountData: newAcct,
- Round: lastUpdateRound,
- Ref: ref,
- UpdRound: basics.Round(updRound),
- }
- updatedAccounts = append(updatedAccounts, updated)
- prevAcct = updated
- }
+ return nil, err
}
+ // create a new entry.
+ var ref trackerdb.OnlineAccountRef
+ normBalance := newAcct.NormalizedOnlineBalance(proto)
+ ref, err = writer.InsertOnlineAccount(data.address, normBalance, newAcct, updRound, uint64(newAcct.VoteLastValid))
+ if err != nil {
+ return nil, err
+ }
+ updated := trackerdb.PersistedOnlineAccountData{
+ Addr: data.address,
+ AccountData: newAcct,
+ Round: lastUpdateRound,
+ Ref: ref,
+ UpdRound: basics.Round(updRound),
+ }
+ updatedAccounts = append(updatedAccounts, updated)
+ prevAcct = updated
} else if !newAcct.IsVotingEmpty() {
err = fmt.Errorf("non-empty voting data for non-online account %s: %v", data.address.String(), newAcct)
+ return nil, err
}
}
} else {
@@ -1025,46 +1032,44 @@ func onlineAccountsNewRoundImpl(
// new value is zero then go offline
if newStatus == basics.Online {
err = fmt.Errorf("empty voting data but online account %s: %v", data.address.String(), newAcct)
- } else {
- var ref trackerdb.OnlineAccountRef
- ref, err = writer.InsertOnlineAccount(data.address, 0, trackerdb.BaseOnlineAccountData{}, updRound, 0)
- if err == nil {
- updated := trackerdb.PersistedOnlineAccountData{
- Addr: data.address,
- AccountData: trackerdb.BaseOnlineAccountData{},
- Round: lastUpdateRound,
- Ref: ref,
- UpdRound: basics.Round(updRound),
- }
-
- updatedAccounts = append(updatedAccounts, updated)
- prevAcct = updated
- }
+ return nil, err
+ }
+ var ref trackerdb.OnlineAccountRef
+ ref, err = writer.InsertOnlineAccount(data.address, 0, trackerdb.BaseOnlineAccountData{}, updRound, 0)
+ if err != nil {
+ return nil, err
+ }
+ updated := trackerdb.PersistedOnlineAccountData{
+ Addr: data.address,
+ AccountData: trackerdb.BaseOnlineAccountData{},
+ Round: lastUpdateRound,
+ Ref: ref,
+ UpdRound: basics.Round(updRound),
}
+
+ updatedAccounts = append(updatedAccounts, updated)
+ prevAcct = updated
} else {
if prevAcct.AccountData != newAcct {
var ref trackerdb.OnlineAccountRef
normBalance := newAcct.NormalizedOnlineBalance(proto)
ref, err = writer.InsertOnlineAccount(data.address, normBalance, newAcct, updRound, uint64(newAcct.VoteLastValid))
- if err == nil {
- updated := trackerdb.PersistedOnlineAccountData{
- Addr: data.address,
- AccountData: newAcct,
- Round: lastUpdateRound,
- Ref: ref,
- UpdRound: basics.Round(updRound),
- }
-
- updatedAccounts = append(updatedAccounts, updated)
- prevAcct = updated
+ if err != nil {
+ return nil, err
+ }
+ updated := trackerdb.PersistedOnlineAccountData{
+ Addr: data.address,
+ AccountData: newAcct,
+ Round: lastUpdateRound,
+ Ref: ref,
+ UpdRound: basics.Round(updRound),
}
+
+ updatedAccounts = append(updatedAccounts, updated)
+ prevAcct = updated
}
}
}
-
- if err != nil {
- return
- }
}
}
diff --git a/ledger/acctdeltas_test.go b/ledger/acctdeltas_test.go
index 146a8f1ac..2dc748225 100644
--- a/ledger/acctdeltas_test.go
+++ b/ledger/acctdeltas_test.go
@@ -850,6 +850,7 @@ func TestLookupKeysByPrefix(t *testing.T) {
{key: []byte("DingHo-StandardPack"), value: []byte("5bucks25cents")},
{key: []byte("BostonKitchen-CheeseSlice"), value: []byte("3bucks50cents")},
{key: []byte(`™£´´∂ƒ∂ƒßƒ©∑®ƒß∂†¬∆`), value: []byte("random Bluh")},
+ {key: []byte(`a-random-box-key`), value: []byte{}},
}
err = dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
@@ -1067,6 +1068,143 @@ func BenchmarkLookupKeyByPrefix(b *testing.B) {
}
}
+func TestKVStoreNilBlobConversion(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // +-------------------------------------------------------------+
+ // | Section 1: Create a ledger with tracer DB of user_version 9 |
+ // +-------------------------------------------------------------+
+
+ const inMem = false
+
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
+
+ dbs, dbName := storetesting.DbOpenTest(t, inMem)
+ storetesting.SetDbLogging(t, dbs)
+
+ err := dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ sqlitedriver.AccountsInitTest(t, tx, make(map[basics.Address]basics.AccountData), protocol.ConsensusCurrentVersion)
+ return nil
+ })
+ require.NoError(t, err)
+
+ defer func() {
+ dbs.Close()
+ require.NoError(t, os.Remove(dbName))
+ }()
+
+ targetVersion := int32(10)
+
+ err = dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err0 error) {
+ _, err0 = tx.ExecContext(ctx, fmt.Sprintf("PRAGMA user_version = %d", targetVersion-1))
+ return
+ })
+ require.NoError(t, err)
+
+ // +-----------------------------------------------------------------+
+ // | ^ Section 1 finishes above |
+ // | |
+ // | Section 2: jams a bunch of key value with value nil into the DB |
+ // +-----------------------------------------------------------------+
+
+ kvPairDBPrepareSet := []struct{ key []byte }{
+ {key: []byte{0xFF, 0x12, 0x34, 0x56, 0x78}},
+ {key: []byte{0xFF, 0xFF, 0x34, 0x56, 0x78}},
+ {key: []byte{0xFF, 0xFF, 0xFF, 0x56, 0x78}},
+ {key: []byte{0xFF, 0xFF, 0xFF, 0xFF, 0x78}},
+ {key: []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}},
+ {key: []byte{0xFF, 0xFE, 0xFF}},
+ {key: []byte{0xFF, 0xFF, 0x00, 0xFF, 0xFF}},
+ {key: []byte{0xFF, 0xFF}},
+ {key: []byte{0xBA, 0xDD, 0xAD, 0xFF, 0xFF}},
+ {key: []byte{0xBA, 0xDD, 0xAE, 0x00}},
+ {key: []byte{0xBA, 0xDD, 0xAE}},
+ {key: []byte("TACOCAT")},
+ {key: []byte("TACOBELL")},
+ {key: []byte("DingHo-SmallPack")},
+ {key: []byte("DingHo-StandardPack")},
+ {key: []byte("BostonKitchen-CheeseSlice")},
+ {key: []byte(`™£´´∂ƒ∂ƒßƒ©∑®ƒß∂†¬∆`)},
+ }
+
+ err = dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err0 error) {
+ writer, err0 := sqlitedriver.MakeAccountsSQLWriter(tx, false, false, true, false)
+ defer writer.Close()
+ if err0 != nil {
+ return
+ }
+ for i := 0; i < len(kvPairDBPrepareSet); i++ {
+ err0 = writer.UpsertKvPair(string(kvPairDBPrepareSet[i].key), nil)
+ if err0 != nil {
+ return
+ }
+ }
+ return
+ })
+ require.NoError(t, err)
+
+ // +---------------------------------------------------------------------------+
+ // | ^ Section 2 finishes above |
+ // | |
+ // | Section 3: Confirm that tracker DB has value being nil, not anything else |
+ // +---------------------------------------------------------------------------+
+
+ nilRowCounter := func() (nilRowCount int, err error) {
+ err = dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err0 error) {
+ stmt, err0 := tx.PrepareContext(ctx, "SELECT key FROM kvstore WHERE value IS NULL;")
+ if err0 != nil {
+ return
+ }
+ rows, err0 := stmt.QueryContext(ctx)
+ if err0 != nil {
+ return
+ }
+ for rows.Next() {
+ var key sql.NullString
+ if err0 = rows.Scan(&key); err0 != nil {
+ return
+ }
+ if !key.Valid {
+ err0 = fmt.Errorf("scan from db get invalid key: %#v", key)
+ return
+ }
+ nilRowCount++
+ }
+ return
+ })
+ return
+ }
+
+ nilRowCount, err := nilRowCounter()
+ require.NoError(t, err)
+ require.Equal(t, len(kvPairDBPrepareSet), nilRowCount)
+
+ // +---------------------------------------------------------------------+
+ // | ^ Section 3 finishes above |
+ // | |
+ // | Section 4: Run migration to see replace nils with empty byte slices |
+ // +---------------------------------------------------------------------+
+
+ trackerDBWrapper := sqlitedriver.CreateTrackerSQLStore(dbs)
+ err = trackerDBWrapper.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err0 error) {
+ _, err0 = tx.RunMigrations(ctx, trackerdb.Params{}, log, targetVersion)
+ return
+ })
+ require.NoError(t, err)
+
+ // +------------------------------------------------------------------------------------------------+
+ // | ^ Section 4 finishes above |
+ // | |
+ // | After that, we can confirm the DB migration found all nil strings and executed the conversions |
+ // +------------------------------------------------------------------------------------------------+
+
+ nilRowCount, err = nilRowCounter()
+ require.NoError(t, err)
+ require.Equal(t, 0, nilRowCount)
+}
+
// upsert updates existing or inserts a new entry
func (a *compactResourcesDeltas) upsert(delta resourceDelta) {
if idx, exist := a.cache[accountCreatable{address: delta.address, index: delta.oldResource.Aidx}]; exist {
@@ -1313,7 +1451,7 @@ func TestLookupAccountAddressFromAddressID(t *testing.T) {
err = dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
arw, err := tx.MakeAccountsReaderWriter()
if err != nil {
- return nil
+ return err
}
for addr, addrid := range addrsids {
@@ -2898,6 +3036,217 @@ func TestOnlineAccountsNewRoundError(t *testing.T) {
require.Empty(t, updated)
}
+type mockAccountsErrorWriter struct {
+}
+
+var errMockAccountsErrorWriterIns = errors.New("synthetic ins err")
+var errMockAccountsErrorWriterUpd = errors.New("synthetic upd err")
+var errMockAccountsErrorWriterDel = errors.New("synthetic del err")
+
+func (w *mockAccountsErrorWriter) InsertAccount(addr basics.Address, normBalance uint64, data trackerdb.BaseAccountData) (ref trackerdb.AccountRef, err error) {
+ return nil, errMockAccountsErrorWriterIns
+}
+func (w *mockAccountsErrorWriter) DeleteAccount(ref trackerdb.AccountRef) (rowsAffected int64, err error) {
+ return 0, errMockAccountsErrorWriterDel
+}
+func (w *mockAccountsErrorWriter) UpdateAccount(ref trackerdb.AccountRef, normBalance uint64, data trackerdb.BaseAccountData) (rowsAffected int64, err error) {
+ return 0, errMockAccountsErrorWriterUpd
+}
+func (w *mockAccountsErrorWriter) InsertResource(accountRef trackerdb.AccountRef, aidx basics.CreatableIndex, data trackerdb.ResourcesData) (ref trackerdb.ResourceRef, err error) {
+ return nil, errMockAccountsErrorWriterIns
+}
+func (w *mockAccountsErrorWriter) DeleteResource(accountRef trackerdb.AccountRef, aidx basics.CreatableIndex) (rowsAffected int64, err error) {
+ return 0, errMockAccountsErrorWriterDel
+}
+func (w *mockAccountsErrorWriter) UpdateResource(accountRef trackerdb.AccountRef, aidx basics.CreatableIndex, data trackerdb.ResourcesData) (rowsAffected int64, err error) {
+ return 0, errMockAccountsErrorWriterUpd
+}
+func (w *mockAccountsErrorWriter) UpsertKvPair(key string, value []byte) error {
+ return errMockAccountsErrorWriterUpd
+}
+func (w *mockAccountsErrorWriter) DeleteKvPair(key string) error {
+ return errMockAccountsErrorWriterDel
+}
+func (w *mockAccountsErrorWriter) InsertCreatable(cidx basics.CreatableIndex, ctype basics.CreatableType, creator []byte) (ref trackerdb.CreatableRef, err error) {
+ return nil, errMockAccountsErrorWriterIns
+}
+func (w *mockAccountsErrorWriter) DeleteCreatable(cidx basics.CreatableIndex, ctype basics.CreatableType) (rowsAffected int64, err error) {
+ return 0, errMockAccountsErrorWriterDel
+}
+func (w *mockAccountsErrorWriter) Close() {
+}
+
+// TestAccountsNewRoundError checks accountsNewRound propagates errors to the caller
+func TestAccountsNewRoundError(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ writer := &mockAccountsErrorWriter{}
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ addrA := ledgertesting.RandomAddress()
+
+ type testcase struct {
+ ad accountDelta
+ rd resourceDelta
+ kd map[string]modifiedKvValue
+ cd map[basics.CreatableIndex]ledgercore.ModifiedCreatable
+ expErr error
+ }
+
+ tests := []testcase{
+ {
+ ad: accountDelta{ // acct A is new
+ address: addrA,
+ newAcct: trackerdb.BaseAccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: 100_000_000},
+ UpdateRound: 1,
+ },
+ },
+ expErr: errMockAccountsErrorWriterIns,
+ },
+ {
+ ad: accountDelta{ // acct A is old, update
+ address: addrA,
+ oldAcct: trackerdb.PersistedAccountData{
+ Addr: addrA,
+ Ref: &mockEntryRef{1},
+ AccountData: trackerdb.BaseAccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: 100_000_000},
+ UpdateRound: 0,
+ },
+ Round: 0,
+ },
+ newAcct: trackerdb.BaseAccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: 100_000_000},
+ UpdateRound: 1,
+ },
+ },
+ expErr: errMockAccountsErrorWriterUpd,
+ },
+ {
+ ad: accountDelta{ // acct A is old, delete
+ address: addrA,
+ oldAcct: trackerdb.PersistedAccountData{
+ Addr: addrA,
+ Ref: &mockEntryRef{1},
+ AccountData: trackerdb.BaseAccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: 100_000_000},
+ UpdateRound: 0,
+ },
+ Round: 0,
+ },
+ newAcct: trackerdb.BaseAccountData{},
+ },
+ expErr: errMockAccountsErrorWriterDel,
+ },
+ {
+ rd: resourceDelta{ // new entry
+ oldResource: trackerdb.PersistedResourcesData{AcctRef: &mockEntryRef{1}},
+ newResource: trackerdb.ResourcesData{
+ Total: 1,
+ SchemaNumUint: 1,
+ },
+ nAcctDeltas: 1,
+ address: addrA,
+ },
+ expErr: errMockAccountsErrorWriterIns,
+ },
+ {
+ rd: resourceDelta{ // existing entry
+ oldResource: trackerdb.PersistedResourcesData{
+ AcctRef: &mockEntryRef{1},
+ Data: trackerdb.ResourcesData{
+ Total: 1,
+ SchemaNumUint: 1,
+ },
+ },
+ newResource: trackerdb.ResourcesData{
+ Total: 2,
+ SchemaNumUint: 2,
+ },
+ nAcctDeltas: 1,
+ address: addrA,
+ },
+ expErr: errMockAccountsErrorWriterUpd,
+ },
+ {
+ rd: resourceDelta{ // deleting entry
+ oldResource: trackerdb.PersistedResourcesData{
+ AcctRef: &mockEntryRef{1},
+ Data: trackerdb.ResourcesData{
+ Total: 2,
+ SchemaNumUint: 2,
+ },
+ },
+ nAcctDeltas: 1,
+ address: addrA,
+ },
+ expErr: errMockAccountsErrorWriterDel,
+ },
+ {
+ kd: map[string]modifiedKvValue{
+ "key1": {
+ data: []byte("value1"),
+ },
+ },
+ expErr: errMockAccountsErrorWriterUpd,
+ },
+ {
+ kd: map[string]modifiedKvValue{
+ "key1": {
+ oldData: []byte("value1"),
+ },
+ },
+ expErr: errMockAccountsErrorWriterDel,
+ },
+ {
+ cd: map[basics.CreatableIndex]ledgercore.ModifiedCreatable{
+ 1: {
+ Created: true,
+ },
+ },
+ expErr: errMockAccountsErrorWriterIns,
+ },
+ {
+ cd: map[basics.CreatableIndex]ledgercore.ModifiedCreatable{
+ 2: {
+ Created: false,
+ },
+ },
+ expErr: errMockAccountsErrorWriterDel,
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) {
+ updates := compactAccountDeltas{}
+ resources := compactResourcesDeltas{}
+ if test.ad != (accountDelta{}) {
+ updates.deltas = append(updates.deltas, test.ad)
+ }
+ if test.rd.nAcctDeltas != 0 {
+ resources.deltas = append(resources.deltas, test.rd)
+ }
+ var kvs map[string]modifiedKvValue
+ if len(test.kd) != 0 {
+ kvs = test.kd
+ }
+ var creatables map[basics.CreatableIndex]ledgercore.ModifiedCreatable
+ if len(test.cd) != 0 {
+ creatables = test.cd
+ }
+ lastUpdateRound := basics.Round(i + 1)
+ updatedAcct, updatedResources, updatedKvs, err := accountsNewRoundImpl(writer, updates, resources, kvs, creatables, proto, lastUpdateRound)
+ require.Error(t, err)
+ require.Equal(t, test.expErr, err)
+ require.Empty(t, updatedAcct)
+ require.Empty(t, updatedResources)
+ require.Empty(t, updatedKvs)
+ })
+ }
+}
+
func randomBaseAccountData() trackerdb.BaseAccountData {
vd := trackerdb.BaseVotingData{
VoteFirstValid: basics.Round(crypto.RandUint64()),
diff --git a/ledger/acctonline.go b/ledger/acctonline.go
index 2eb962f86..a9834d61c 100644
--- a/ledger/acctonline.go
+++ b/ledger/acctonline.go
@@ -23,7 +23,6 @@ import (
"fmt"
"sort"
"sync"
- "sync/atomic"
"time"
"github.com/algorand/go-deadlock"
@@ -34,6 +33,7 @@ import (
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/ledger/store/trackerdb"
"github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/metrics"
)
@@ -347,7 +347,7 @@ func (ao *onlineAccounts) consecutiveVersion(offset uint64) uint64 {
return offset
}
-func (ao *onlineAccounts) handleUnorderedCommit(dcc *deferredCommitContext) {
+func (ao *onlineAccounts) handleUnorderedCommitOrError(dcc *deferredCommitContext) {
}
func (ao *onlineAccounts) maxBalLookback() uint64 {
@@ -357,6 +357,16 @@ func (ao *onlineAccounts) maxBalLookback() uint64 {
// prepareCommit prepares data to write to the database a "chunk" of rounds, and update the cached dbRound accordingly.
func (ao *onlineAccounts) prepareCommit(dcc *deferredCommitContext) error {
+ err := ao.prepareCommitInternal(dcc)
+ if err != nil {
+ return err
+ }
+
+ return ao.voters.prepareCommit(dcc)
+}
+
+// prepareCommitInternal preforms prepareCommit's logic without locking the tracker's mutex.
+func (ao *onlineAccounts) prepareCommitInternal(dcc *deferredCommitContext) error {
offset := dcc.offset
ao.accountsMu.RLock()
@@ -370,13 +380,6 @@ func (ao *onlineAccounts) prepareCommit(dcc *deferredCommitContext) error {
// Index that corresponds to the oldest round still in deltas
startIndex := len(ao.onlineRoundParamsData) - len(ao.deltas) - 1
if ao.onlineRoundParamsData[startIndex+1].CurrentProtocol != ao.onlineRoundParamsData[startIndex+int(offset)].CurrentProtocol {
- // in scheduleCommit, we expect that this function to update the catchpointWriting when
- // it's on a catchpoint round and the node is configured to generate catchpoints. Doing this in a deferred function
- // here would prevent us from "forgetting" to update this variable later on.
- // The same is repeated in commitRound on errors.
- if dcc.catchpointFirstStage && dcc.enableGeneratingCatchpointFiles {
- atomic.StoreInt32(dcc.catchpointDataWriting, 0)
- }
return fmt.Errorf("attempted to commit series of rounds with non-uniform consensus versions")
}
@@ -516,46 +519,71 @@ func (ao *onlineAccounts) postCommit(ctx context.Context, dcc *deferredCommitCon
ao.accountsMu.Unlock()
ao.accountsReadCond.Broadcast()
+
+ ao.voters.postCommit(dcc)
}
func (ao *onlineAccounts) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) {
}
-// onlineTotals return the total online balance for the given round.
-func (ao *onlineAccounts) onlineTotals(rnd basics.Round) (basics.MicroAlgos, error) {
- ao.accountsMu.RLock()
- defer ao.accountsMu.RUnlock()
- return ao.onlineTotalsImpl(rnd)
+// onlineCirculation return the total online balance for the given round, for use by agreement.
+func (ao *onlineAccounts) onlineCirculation(rnd basics.Round, voteRnd basics.Round) (basics.MicroAlgos, error) {
+ // Get cached total stake for rnd
+ totalStake, proto, err := ao.onlineTotals(rnd)
+ if err != nil {
+ return basics.MicroAlgos{}, err
+ }
+
+ // Check if we need to subtract expired stake
+ if params := config.Consensus[proto]; params.ExcludeExpiredCirculation {
+ // Handle case when the balanceRound() used by agreement is 0, resulting in rnd=0.
+ // Agreement will ask us for the circulation at round 0 for the first 320 blocks.
+ // In this case, we don't subtract expired stake, since we are still using genesis balances.
+ // Agreement will later ask us for the balance of round 1 when the voteRnd is 321.
+ if rnd == 0 {
+ return totalStake, nil
+ }
+ expiredStake, err := ao.ExpiredOnlineCirculation(rnd, voteRnd)
+ if err != nil {
+ return basics.MicroAlgos{}, err
+ }
+ ot := basics.OverflowTracker{}
+ totalStake = ot.SubA(totalStake, expiredStake)
+ if ot.Overflowed {
+ return basics.MicroAlgos{}, fmt.Errorf("onlineTotals: overflow subtracting %v from %v", expiredStake, totalStake)
+ }
+ }
+ return totalStake, nil
}
// onlineTotalsEx return the total online balance for the given round for extended rounds range
// by looking into DB
func (ao *onlineAccounts) onlineTotalsEx(rnd basics.Round) (basics.MicroAlgos, error) {
- ao.accountsMu.RLock()
- totalsOnline, err := ao.onlineTotalsImpl(rnd)
- ao.accountsMu.RUnlock()
+ totalsOnline, _, err := ao.onlineTotals(rnd)
if err == nil {
- return totalsOnline, err
+ return totalsOnline, nil
}
var roundOffsetError *RoundOffsetError
if !errors.As(err, &roundOffsetError) {
- ao.log.Errorf("onlineTotalsImpl error: %v", err)
+ ao.log.Errorf("onlineTotals error: %v", err)
}
totalsOnline, err = ao.accountsq.LookupOnlineTotalsHistory(rnd)
return totalsOnline, err
}
-// onlineTotalsImpl returns the online totals of all accounts at the end of round rnd.
-func (ao *onlineAccounts) onlineTotalsImpl(rnd basics.Round) (basics.MicroAlgos, error) {
+// onlineTotals returns the online totals of all accounts at the end of round rnd.
+func (ao *onlineAccounts) onlineTotals(rnd basics.Round) (basics.MicroAlgos, protocol.ConsensusVersion, error) {
+ ao.accountsMu.RLock()
+ defer ao.accountsMu.RUnlock()
offset, err := ao.roundParamsOffset(rnd)
if err != nil {
- return basics.MicroAlgos{}, err
+ return basics.MicroAlgos{}, "", err
}
onlineRoundParams := ao.onlineRoundParamsData[offset]
- return basics.MicroAlgos{Raw: onlineRoundParams.OnlineSupply}, nil
+ return basics.MicroAlgos{Raw: onlineRoundParams.OnlineSupply}, onlineRoundParams.CurrentProtocol, nil
}
// LookupOnlineAccountData returns the online account data for a given address at a given round.
@@ -803,6 +831,7 @@ func (ao *onlineAccounts) TopOnlineAccounts(rnd basics.Round, voteRnd basics.Rou
if !(d.VoteFirstValid <= voteRnd && voteRnd <= d.VoteLastValid) {
modifiedAccounts[addr] = nil
invalidOnlineAccounts[addr] = accountDataToOnline(addr, &d, genesisProto)
+
continue
}
@@ -916,6 +945,21 @@ func (ao *onlineAccounts) TopOnlineAccounts(rnd basics.Round, voteRnd basics.Rou
if err != nil {
return nil, basics.MicroAlgos{}, err
}
+
+ // If set, return total online stake minus all future expired stake by voteRnd
+ if params.ExcludeExpiredCirculation {
+ expiredStake, err := ao.ExpiredOnlineCirculation(rnd, voteRnd)
+ if err != nil {
+ return nil, basics.MicroAlgos{}, err
+ }
+ ot := basics.OverflowTracker{}
+ onlineStake := ot.SubA(totalOnlineStake, expiredStake)
+ if ot.Overflowed {
+ return nil, basics.MicroAlgos{}, fmt.Errorf("TopOnlineAccounts: overflow subtracting ExpiredOnlineCirculation: %d - %d", totalOnlineStake, expiredStake)
+ }
+ return topOnlineAccounts, onlineStake, nil
+ }
+
ot := basics.OverflowTracker{}
for _, oa := range invalidOnlineAccounts {
totalOnlineStake = ot.SubA(totalOnlineStake, oa.MicroAlgos)
@@ -935,5 +979,113 @@ func (ao *onlineAccounts) TopOnlineAccounts(rnd basics.Round, voteRnd basics.Rou
}
}
+func (ao *onlineAccounts) onlineAcctsExpiredByRound(rnd, voteRnd basics.Round) (map[basics.Address]*ledgercore.OnlineAccountData, error) {
+ needUnlock := false
+ defer func() {
+ if needUnlock {
+ ao.accountsMu.RUnlock()
+ }
+ }()
+
+ var expiredAccounts map[basics.Address]*ledgercore.OnlineAccountData
+ ao.accountsMu.RLock()
+ needUnlock = true
+ for {
+ currentDbRound := ao.cachedDBRoundOnline
+ currentDeltaLen := len(ao.deltas)
+ offset, err := ao.roundOffset(rnd)
+ if err != nil {
+ var roundOffsetError *RoundOffsetError
+ if !errors.As(err, &roundOffsetError) {
+ return nil, err
+ }
+ // roundOffsetError was returned, so the round number cannot be found in deltas, it is in history.
+ // This means offset will be 0 and ao.deltas[:offset] will be an empty slice.
+ }
+ paramsOffset, err := ao.roundParamsOffset(rnd)
+ if err != nil {
+ return nil, err
+ }
+
+ rewardsParams := config.Consensus[ao.onlineRoundParamsData[paramsOffset].CurrentProtocol]
+ rewardsLevel := ao.onlineRoundParamsData[paramsOffset].RewardsLevel
+
+ // Step 1: get all online accounts from DB for rnd
+ // Not unlocking ao.accountsMu yet, to stay consistent with Step 2
+ var dbRound basics.Round
+ err = ao.dbs.Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) (err error) {
+ ar, err := tx.MakeAccountsReader()
+ if err != nil {
+ return err
+ }
+ expiredAccounts, err = ar.ExpiredOnlineAccountsForRound(rnd, voteRnd, rewardsParams, rewardsLevel)
+ if err != nil {
+ return err
+ }
+ dbRound, err = ar.AccountsRound()
+ return err
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // If dbRound has advanced beyond the last read of ao.cachedDBRoundOnline, postCommmit has
+ // occurred since then, so wait until deltas is consistent with dbRound and try again.
+ if dbRound > currentDbRound {
+ // database round doesn't match the last au.dbRound we sampled.
+ for currentDbRound >= ao.cachedDBRoundOnline && currentDeltaLen == len(ao.deltas) {
+ ao.accountsReadCond.Wait()
+ }
+ continue // retry (restart for loop)
+ }
+ if dbRound < currentDbRound {
+ ao.log.Errorf("onlineAccounts.ValidOnlineCirculation: database round %d is behind in-memory round %d", dbRound, currentDbRound)
+ return nil, &StaleDatabaseRoundError{databaseRound: dbRound, memoryRound: currentDbRound}
+ }
+
+ // Step 2: Apply pending changes for each block in deltas
+ // Iterate through per-round deltas up to offset: target round `rnd` is ao.deltas[offset-1].
+ for o := uint64(0); o < offset; o++ {
+ for i := 0; i < ao.deltas[o].Len(); i++ {
+ addr, d := ao.deltas[o].GetByIdx(i)
+ // Each round's deltas can insert, update, or delete values in the onlineAccts map.
+ // Note, VoteFirstValid is not checked here on purpose since the current implementation does not allow
+ // setting VoteFirstValid into future.
+ if d.Status == basics.Online && d.VoteLastValid != 0 && voteRnd > d.VoteLastValid {
+ // Online expired: insert or overwrite the old data in expiredAccounts.
+ oadata := d.OnlineAccountData(rewardsParams, rewardsLevel)
+ expiredAccounts[addr] = &oadata
+ } else {
+ // addr went offline not expired, so do not report as an expired ONLINE account.
+ delete(expiredAccounts, addr)
+ }
+ }
+ }
+ break // successfully retrieved onlineAccts from DB & deltas
+ }
+ ao.accountsMu.RUnlock()
+ needUnlock = false
+
+ return expiredAccounts, nil
+}
+
+// ExpiredOnlineCirculation returns the total online stake for accounts with participation keys registered
+// at round `rnd` that are expired by round `voteRnd`.
+func (ao *onlineAccounts) ExpiredOnlineCirculation(rnd, voteRnd basics.Round) (basics.MicroAlgos, error) {
+ expiredAccounts, err := ao.onlineAcctsExpiredByRound(rnd, voteRnd)
+ if err != nil {
+ return basics.MicroAlgos{}, err
+ }
+ ot := basics.OverflowTracker{}
+ expiredStake := basics.MicroAlgos{}
+ for _, d := range expiredAccounts {
+ expiredStake = ot.AddA(expiredStake, d.MicroAlgosWithRewards)
+ if ot.Overflowed {
+ return basics.MicroAlgos{}, fmt.Errorf("ExpiredOnlineCirculation: overflow totaling expired stake")
+ }
+ }
+ return expiredStake, nil
+}
+
var ledgerAccountsonlinetopCount = metrics.NewCounter("ledger_accountsonlinetop_count", "calls")
var ledgerAccountsonlinetopMicros = metrics.NewCounter("ledger_accountsonlinetop_micros", "µs spent")
diff --git a/ledger/acctonline_expired_test.go b/ledger/acctonline_expired_test.go
new file mode 100644
index 000000000..0645d0dd7
--- /dev/null
+++ b/ledger/acctonline_expired_test.go
@@ -0,0 +1,689 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "encoding/binary"
+ "math/rand"
+ "os"
+ "strconv"
+ "testing"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/txntest"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+// onlineAcctModel provides a simple interface for tracking accounts
+// as they come online, go offline, and change their amount of stake.
+// It is implemented by a real ledger (doubleLedgerAcctModel) for testing
+// against a reference implementation (mapOnlineAcctModel).
+type onlineAcctModel interface {
+ currentRound() basics.Round
+ nextRound()
+ advanceToRound(rnd basics.Round)
+ goOnline(addr basics.Address, firstvalid, lastvalid basics.Round)
+ goOffline(addr basics.Address)
+ updateStake(addr basics.Address, stake basics.MicroAlgos)
+ teardown()
+
+ LookupAgreement(rnd basics.Round, addr basics.Address) onlineAcctModelAcct
+ OnlineCirculation(rnd basics.Round, voteRnd basics.Round) basics.MicroAlgos
+ ExpiredOnlineCirculation(rnd, voteRnd basics.Round) basics.MicroAlgos
+}
+
+// mapOnlineAcctModel provides a reference implementation for tracking online accounts used
+// for testing TopOnlineAccounts, ExpiredOnlineCirculation, and onlineAcctsExpiredByRound.
+// It is an oracle that the doubleLedgerAcctModel is compared against.
+type mapOnlineAcctModel struct {
+ t *testing.T
+ cur basics.Round
+ accts map[basics.Address]map[basics.Round]onlineAcctModelAcct
+ expiring map[basics.Round]map[basics.Address]struct{}
+}
+
+type onlineAcctModelAcct struct {
+ Status basics.Status
+ VoteFirstValid, VoteLastValid basics.Round
+ Stake basics.MicroAlgos
+}
+
+func newMapOnlineAcctModel(t *testing.T) *mapOnlineAcctModel {
+ return &mapOnlineAcctModel{
+ t: t,
+ accts: make(map[basics.Address]map[basics.Round]onlineAcctModelAcct),
+ expiring: make(map[basics.Round]map[basics.Address]struct{}),
+ }
+}
+
+func (m *mapOnlineAcctModel) teardown() {}
+func (m *mapOnlineAcctModel) currentRound() basics.Round { return m.cur }
+func (m *mapOnlineAcctModel) nextRound() { m.cur++ }
+func (m *mapOnlineAcctModel) advanceToRound(rnd basics.Round) {
+ if rnd == m.cur {
+ return
+ }
+ require.Greater(m.t, rnd, m.cur, "cannot advance to previous round")
+ m.cur = rnd
+}
+
+func (m *mapOnlineAcctModel) lookupAcctAsOf(rnd basics.Round, addr basics.Address) onlineAcctModelAcct {
+ require.LessOrEqual(m.t, rnd, m.cur, "cannot lookup acct for future round")
+ acctRounds, ok := m.accts[addr]
+ if !ok {
+ return onlineAcctModelAcct{}
+ }
+ // find the acct record for the most recent round <= rnd
+ for r := rnd; r >= 0; r-- {
+ if acct, ok := acctRounds[r]; ok {
+ return acct
+ }
+ }
+ // not found
+ return onlineAcctModelAcct{}
+}
+
+func (m *mapOnlineAcctModel) LookupAgreement(rnd basics.Round, addr basics.Address) onlineAcctModelAcct {
+ return m.lookupAcctAsOf(rnd, addr)
+}
+
+// look up all online accounts as of the given round
+func (m *mapOnlineAcctModel) allOnlineAsOf(rnd basics.Round) map[basics.Address]onlineAcctModelAcct {
+ require.LessOrEqual(m.t, rnd, m.cur, "cannot lookup acct for future round")
+ accts := make(map[basics.Address]onlineAcctModelAcct)
+ for addr, acctRounds := range m.accts {
+ // find the acct record for the most recent round <= rnd
+ for r := rnd; r >= 0; r-- {
+ if acct, ok := acctRounds[r]; ok {
+ if acct.Status == basics.Online {
+ accts[addr] = acct
+ }
+ // found the most recent round <= rnd, so stop looking
+ // we will break even if the acct is offline
+ break
+ }
+ }
+ }
+ return accts
+}
+
+func (m *mapOnlineAcctModel) OnlineCirculation(rnd basics.Round, voteRnd basics.Round) basics.MicroAlgos {
+ accts := m.allOnlineAsOf(rnd)
+ return m.sumAcctStake(accts)
+}
+
+func (m *mapOnlineAcctModel) ExpiredOnlineCirculation(rnd, voteRnd basics.Round) basics.MicroAlgos {
+ accts := m.onlineAcctsExpiredByRound(rnd, voteRnd)
+ return m.sumAcctStake(accts)
+}
+
+func (m *mapOnlineAcctModel) sumAcctStake(accts map[basics.Address]onlineAcctModelAcct) basics.MicroAlgos {
+ algops := MicroAlgoOperations{a: require.New(m.t)}
+ var ret basics.MicroAlgos
+ for _, acct := range accts {
+ ret = algops.Add(ret, acct.Stake)
+ }
+ return ret
+}
+
+func (m *mapOnlineAcctModel) setAcct(rnd basics.Round, addr basics.Address, acct onlineAcctModelAcct) {
+ require.Equal(m.t, rnd, m.cur, "cannot set acct for round other than current round")
+
+ acctRounds, ok := m.accts[addr]
+ if !ok {
+ acctRounds = make(map[basics.Round]onlineAcctModelAcct)
+ }
+ acctRounds[rnd] = acct
+ m.accts[addr] = acctRounds
+}
+
+func (m *mapOnlineAcctModel) goOnline(addr basics.Address, firstvalid, lastvalid basics.Round) {
+ rnd := m.cur
+ oldAcct := m.lookupAcctAsOf(rnd, addr)
+
+ // if is already online, remove old lastvalid round from expiring map
+ if oldAcct.Status == basics.Online {
+ require.Contains(m.t, m.expiring, oldAcct.VoteLastValid, "round should be in expiring map")
+ require.Contains(m.t, m.expiring[oldAcct.VoteLastValid], addr, "address should be in expiring map")
+ delete(m.expiring[oldAcct.VoteLastValid], addr)
+ }
+
+ // create new acct record
+ newAcct := onlineAcctModelAcct{
+ Status: basics.Online,
+ VoteFirstValid: firstvalid,
+ VoteLastValid: lastvalid,
+ Stake: oldAcct.Stake,
+ }
+ m.setAcct(rnd, addr, newAcct)
+
+ // remember when this account will expire
+ expiring, ok := m.expiring[lastvalid]
+ if !ok {
+ expiring = make(map[basics.Address]struct{})
+ }
+ expiring[addr] = struct{}{}
+ m.expiring[lastvalid] = expiring
+
+}
+
+func (m *mapOnlineAcctModel) goOffline(addr basics.Address) {
+ rnd := m.cur
+ oldAcct := m.lookupAcctAsOf(rnd, addr)
+
+ // must already be online: remove old lastvalid round from expiring map
+ require.Equal(m.t, basics.Online, oldAcct.Status, "cannot go offline if not online")
+ require.Contains(m.t, m.expiring, oldAcct.VoteLastValid, "round should be in expiring map")
+ require.Contains(m.t, m.expiring[oldAcct.VoteLastValid], addr, "address should be in expiring map")
+ delete(m.expiring[oldAcct.VoteLastValid], addr)
+
+ newAcct := onlineAcctModelAcct{
+ Status: basics.Offline,
+ VoteFirstValid: 0,
+ VoteLastValid: 0,
+ Stake: oldAcct.Stake,
+ }
+ m.setAcct(rnd, addr, newAcct)
+}
+
+func (m *mapOnlineAcctModel) updateStake(addr basics.Address, stake basics.MicroAlgos) {
+ rnd := m.cur
+ acct := m.lookupAcctAsOf(rnd, addr)
+ acct.Stake = stake
+ m.setAcct(rnd, addr, acct)
+}
+
+func (m *mapOnlineAcctModel) onlineAcctsExpiredByRound(rnd, voteRnd basics.Round) map[basics.Address]onlineAcctModelAcct {
+ require.LessOrEqual(m.t, rnd, m.cur, "cannot lookup expired accts for future round")
+
+ // get all online addresses as of rnd
+ ret := make(map[basics.Address]onlineAcctModelAcct)
+ for addr, acct := range m.allOnlineAsOf(rnd) {
+ require.NotZero(m.t, acct.VoteLastValid, "offline acct returned by allOnlineAsOf")
+ // will this acct be expired by voteRnd?
+ if voteRnd > acct.VoteLastValid {
+ ret[addr] = acct
+ }
+ }
+ return ret
+}
+
+// doubleLedgerAcctModel implements an onlineAcctModel using DoubleLedger, which starts up two
+// Ledger instances, a generator and a validator.
+type doubleLedgerAcctModel struct {
+ t testing.TB
+ params *config.ConsensusParams
+ dl *DoubleLedger
+ ops *MicroAlgoOperations
+ genAddrs []basics.Address
+ genBalances bookkeeping.GenesisBalances
+ genSecrets []*crypto.SignatureSecrets
+ // new accounts made by goOnline, balance value tracks uncommitted balance changes before dl.endBlock()
+ accts map[basics.Address]basics.MicroAlgos
+}
+
+func newDoubleLedgerAcctModel(t testing.TB, proto protocol.ConsensusVersion, inMem bool) *doubleLedgerAcctModel {
+ // set 1 Algo for rewards pool size -- rewards math not supported by newMapOnlineAcctModel
+ genesisOpt := ledgertesting.TestGenesisRewardsPoolSize(basics.MicroAlgos{Raw: 1_000_000})
+ genBalances, genAddrs, genSecrets := ledgertesting.NewTestGenesis(genesisOpt)
+ cfg := config.GetDefaultLocal()
+ opts := []simpleLedgerOption{simpleLedgerNotArchival()}
+ if !inMem {
+ opts = append(opts, simpleLedgerOnDisk())
+ }
+ dl := NewDoubleLedger(t, genBalances, proto, cfg, opts...)
+ dl.beginBlock()
+ params := config.Consensus[proto]
+ return &doubleLedgerAcctModel{
+ t: t,
+ params: &params,
+ ops: &MicroAlgoOperations{a: require.New(t)},
+ dl: &dl,
+ genAddrs: genAddrs,
+ genBalances: genBalances,
+ genSecrets: genSecrets,
+ accts: make(map[basics.Address]basics.MicroAlgos),
+ }
+}
+
+func (m *doubleLedgerAcctModel) teardown() { m.dl.Close() }
+
+func (m *doubleLedgerAcctModel) nextRound() {
+ m.dl.endBlock()
+ m.dl.beginBlock()
+}
+
+func (m *doubleLedgerAcctModel) currentRound() basics.Round {
+ genRound := m.dl.generator.Latest()
+ valRound := m.dl.validator.Latest()
+ require.Equal(m.t, genRound, valRound)
+ return genRound + 1
+}
+
+func (m *doubleLedgerAcctModel) advanceToRound(rnd basics.Round) {
+ if rnd == m.currentRound() {
+ return
+ }
+ require.Greater(m.t, rnd, m.currentRound(), "cannot advance to previous round")
+ for m.currentRound() < rnd {
+ m.nextRound()
+ }
+ require.Equal(m.t, rnd, m.currentRound())
+}
+
+const doubleLedgerAcctModelAcctInitialBalance = 1_234_567
+
+func (m *doubleLedgerAcctModel) goOnline(addr basics.Address, firstvalid, lastvalid basics.Round) {
+ if _, ok := m.accts[addr]; !ok {
+ // not yet in the ledger: send 1 algo from a genesis account
+ m.dl.txn(&txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: m.genAddrs[0],
+ Receiver: addr,
+ Amount: doubleLedgerAcctModelAcctInitialBalance,
+ })
+ m.accts[addr] = basics.MicroAlgos{Raw: doubleLedgerAcctModelAcctInitialBalance}
+ }
+
+ require.NotZero(m.t, addr, "cannot go online with zero address")
+
+ minFee := m.params.MinTxnFee // subtract minFee from account balance
+ m.dl.txn(&txntest.Txn{
+ Type: protocol.KeyRegistrationTx,
+ Sender: addr,
+ VoteFirst: firstvalid,
+ VoteLast: lastvalid,
+ Fee: minFee,
+
+ Nonparticipation: false, // XXX test nonparticipating accounts
+
+ // meaningless non-zero voting data
+ VotePK: crypto.OneTimeSignatureVerifier(addr),
+ SelectionPK: crypto.VRFVerifier(addr),
+ VoteKeyDilution: 1024,
+ })
+ m.accts[addr] = m.ops.Sub(m.accts[addr], basics.MicroAlgos{Raw: minFee})
+}
+
+func (m *doubleLedgerAcctModel) goOffline(addr basics.Address) {
+ require.Contains(m.t, m.accts, addr, "cannot go offline with unknown address")
+
+ minFee := m.params.MinTxnFee // subtract minFee from account balance
+ m.dl.txn(&txntest.Txn{
+ Type: protocol.KeyRegistrationTx,
+ Sender: addr,
+ Fee: minFee,
+
+ // not necessary to specify
+ VoteFirst: 0,
+ VoteLast: 0,
+ VotePK: crypto.OneTimeSignatureVerifier{},
+ SelectionPK: crypto.VRFVerifier{},
+ VoteKeyDilution: 0,
+ })
+ m.accts[addr] = m.ops.Sub(m.accts[addr], basics.MicroAlgos{Raw: minFee})
+}
+
+func (m *doubleLedgerAcctModel) updateStake(addr basics.Address, amount basics.MicroAlgos) {
+ curStake := m.accts[addr]
+ require.GreaterOrEqual(m.t, amount.Raw, curStake.Raw, "currently cannot decrease stake")
+ if amount == curStake {
+ return
+ }
+ if amount.Raw > curStake.Raw {
+ sendAmt := m.ops.Sub(amount, curStake)
+ // send more algo from a genesis account
+ m.dl.txn(&txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: m.genAddrs[0],
+ Receiver: addr,
+ Amount: sendAmt.Raw,
+ Fee: m.params.MinTxnFee,
+ })
+ m.accts[addr] = amount
+ m.t.Logf("updateStake addr %s sent %d, bal %d", addr, sendAmt, amount)
+ }
+}
+
+// OnlineCirculation returns the total online stake at rnd this model produced, while
+// also asserting that the validator and generator Ledgers both agree, and that different
+// Ledger/tracker methods used to retrieve and calculate the stake internally agree.
+func (m *doubleLedgerAcctModel) OnlineCirculation(rnd basics.Round, voteRnd basics.Round) basics.MicroAlgos {
+ valTotal, err := m.dl.validator.OnlineTotalStake(rnd)
+ require.NoError(m.t, err)
+ genTotal, err := m.dl.generator.OnlineTotalStake(rnd)
+ require.NoError(m.t, err)
+ require.Equal(m.t, valTotal, genTotal)
+
+ valStake, err := m.dl.validator.OnlineCirculation(rnd, voteRnd)
+ require.NoError(m.t, err)
+ genStake, err := m.dl.generator.OnlineCirculation(rnd, voteRnd)
+ require.NoError(m.t, err)
+ require.Equal(m.t, valStake, genStake)
+
+ // If ExcludeExpiredCirculation is set, this means OnlineCirculation
+ // has already subtracted the expired stake. So to get the total, add
+ // it back in by querying ExpiredOnlineCirculation.
+ if m.params.ExcludeExpiredCirculation {
+ expiredStake := m.ExpiredOnlineCirculation(rnd, rnd+320)
+ valStake = m.ops.Add(valStake, expiredStake)
+ }
+
+ // This should equal the value of onlineTotalsImpl(rnd) which provides
+ // the total online stake without subtracting expired stake.
+ require.Equal(m.t, valTotal, valStake)
+
+ return valStake
+}
+
+// OnlineTotalStake is a wrapper to access onlineAccounts.onlineTotalsImpl safely.
+func (l *Ledger) OnlineTotalStake(rnd basics.Round) (basics.MicroAlgos, error) {
+ l.trackerMu.RLock()
+ defer l.trackerMu.RUnlock()
+ totalStake, _, err := l.acctsOnline.onlineTotals(rnd)
+ return totalStake, err
+}
+
+// ExpiredOnlineCirculation is a wrapper to call onlineAccounts.ExpiredOnlineCirculation safely.
+func (l *Ledger) ExpiredOnlineCirculation(rnd, voteRnd basics.Round) (basics.MicroAlgos, error) {
+ l.trackerMu.RLock()
+ defer l.trackerMu.RUnlock()
+ return l.acctsOnline.ExpiredOnlineCirculation(rnd, voteRnd)
+}
+
+// ExpiredOnlineCirculation returns the total expired stake at rnd this model produced, while
+// also asserting that the validator and generator Ledgers both agree.
+func (m *doubleLedgerAcctModel) ExpiredOnlineCirculation(rnd, voteRnd basics.Round) basics.MicroAlgos {
+ valStake, err := m.dl.validator.ExpiredOnlineCirculation(rnd, voteRnd)
+ require.NoError(m.t, err)
+ genStake, err := m.dl.generator.ExpiredOnlineCirculation(rnd, voteRnd)
+ require.NoError(m.t, err)
+ require.Equal(m.t, valStake, genStake)
+ return valStake
+}
+
+func (m *doubleLedgerAcctModel) LookupAgreement(rnd basics.Round, addr basics.Address) onlineAcctModelAcct {
+ valAcct, err := m.dl.validator.LookupAgreement(rnd, addr)
+ require.NoError(m.t, err)
+ genAcct, err := m.dl.generator.LookupAgreement(rnd, addr)
+ require.NoError(m.t, err)
+ require.Equal(m.t, valAcct, genAcct)
+
+ status := basics.Offline
+ if valAcct.VoteLastValid > 0 || valAcct.VoteFirstValid > 0 {
+ status = basics.Online
+ }
+ return onlineAcctModelAcct{
+ VoteFirstValid: valAcct.VoteFirstValid,
+ VoteLastValid: valAcct.VoteLastValid,
+ Status: status,
+ Stake: valAcct.MicroAlgosWithRewards,
+ }
+}
+
+//nolint:paralleltest // don't want to parallelize this test
+func TestOnlineAcctModelSimple(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // first test using the in-memory model
+ t.Run("Map", func(t *testing.T) {
+ m := newMapOnlineAcctModel(t)
+ testOnlineAcctModelSimple(t, m)
+ })
+ // test same scenario on double ledger
+ t.Run("DoubleLedger", func(t *testing.T) {
+ m := newDoubleLedgerAcctModel(t, protocol.ConsensusFuture, true)
+ defer m.teardown()
+ testOnlineAcctModelSimple(t, m)
+ })
+}
+
+func testOnlineAcctModelSimple(t *testing.T, m onlineAcctModel) {
+ // acct 1 has 10 algos expiring at round 2000
+ m.goOnline(basics.Address{1}, 1, 2000)
+ m.updateStake(basics.Address{1}, basics.MicroAlgos{Raw: 10_000_000})
+ // acct 2 has 11 algos expiring at round 999
+ m.goOnline(basics.Address{2}, 1, 999)
+ m.updateStake(basics.Address{2}, basics.MicroAlgos{Raw: 11_000_000})
+
+ m.advanceToRound(500)
+ // acct 3 has 11.1 algos expiring at round 2500
+ m.goOnline(basics.Address{3}, 500, 2500)
+ m.updateStake(basics.Address{3}, basics.MicroAlgos{Raw: 11_100_000})
+
+ m.advanceToRound(600)
+ // acct 4 has 11.11 algos expiring at round 900
+ m.goOnline(basics.Address{4}, 600, 900)
+ m.updateStake(basics.Address{4}, basics.MicroAlgos{Raw: 11_110_000})
+
+ m.advanceToRound(1000)
+ // total stake is all 4 accounts
+ a := require.New(t)
+ onlineStake := m.OnlineCirculation(680, 1000)
+ a.Equal(basics.MicroAlgos{Raw: 43_210_000}, onlineStake)
+
+ // expired stake is acct 2 + acct 4
+ expiredStake := m.ExpiredOnlineCirculation(680, 1000)
+ a.Equal(basics.MicroAlgos{Raw: 22_110_000}, expiredStake)
+}
+
+// An onlineScenario is a list of actions to take at each round, which are
+// applied to the onlineAcctModel implementations (real and oracle) being tested.
+type onlineScenario struct {
+ // roundActions is a list of actions to take in each round, must be in rnd order
+ roundActions []onlineScenarioRound
+}
+
+type onlineScenarioRound struct {
+ rnd basics.Round
+ actions []onlineScenarioRoundAction
+}
+
+// An onlineScenarioRoundAction is an action to take on an onlineAcctModel in a given round.
+type onlineScenarioRoundAction interface {
+ apply(t *testing.T, m onlineAcctModel)
+}
+
+type goOnlineWithStakeAction struct {
+ addr basics.Address
+ fv, lv basics.Round
+ stake uint64
+}
+
+func (a goOnlineWithStakeAction) apply(t *testing.T, m onlineAcctModel) {
+ m.goOnline(a.addr, a.fv, a.lv)
+ m.updateStake(a.addr, basics.MicroAlgos{Raw: a.stake})
+}
+
+type goOfflineAction struct{ addr basics.Address }
+
+func (a goOfflineAction) apply(t *testing.T, m onlineAcctModel) { m.goOffline(a.addr) }
+
+type updateStakeAction struct {
+ addr basics.Address
+ stake uint64
+}
+
+func (a updateStakeAction) apply(t *testing.T, m onlineAcctModel) {
+ m.updateStake(a.addr, basics.MicroAlgos{Raw: a.stake})
+}
+
+type checkOnlineStakeAction struct {
+ rnd, voteRnd basics.Round
+ online, expired uint64
+}
+
+func (a checkOnlineStakeAction) apply(t *testing.T, m onlineAcctModel) {
+ onlineStake := m.OnlineCirculation(a.rnd, a.voteRnd)
+ expiredStake := m.ExpiredOnlineCirculation(a.rnd, a.voteRnd)
+ require.Equal(t, basics.MicroAlgos{Raw: a.online}, onlineStake, "round %d, cur %d", a.rnd, m.currentRound())
+ require.Equal(t, basics.MicroAlgos{Raw: a.expired}, expiredStake, "rnd %d voteRnd %d, cur %d", a.rnd, a.voteRnd, m.currentRound())
+}
+
+// simpleOnlineScenario is the same as the TestOnlineAcctModelSimple test
+// but expressed as an onlineScenario
+var simpleOnlineScenario = onlineScenario{
+ roundActions: []onlineScenarioRound{
+ {1, []onlineScenarioRoundAction{
+ // acct 1 has 10 algos expiring at round 2000
+ goOnlineWithStakeAction{basics.Address{1}, 1, 2000, 10_000_000},
+ // acct 2 has 11 algos expiring at round 999
+ goOnlineWithStakeAction{basics.Address{2}, 1, 999, 11_000_000},
+ }},
+ {500, []onlineScenarioRoundAction{
+ // acct 3 has 11.1 algos expiring at round 2500
+ goOnlineWithStakeAction{basics.Address{3}, 500, 2500, 11_100_000},
+ }},
+ {600, []onlineScenarioRoundAction{
+ // acct 4 has 11.11 algos expiring at round 900
+ goOnlineWithStakeAction{basics.Address{4}, 600, 900, 11_110_000},
+ }},
+ {681, []onlineScenarioRoundAction{
+ // total stake is all 4 accounts
+ // expired stake is acct 2 + acct 4
+ checkOnlineStakeAction{680, 1000, 43_210_000, 22_110_000},
+ }},
+ {1000, []onlineScenarioRoundAction{
+ // check total & expired stake again at round 1000, should be the same
+ checkOnlineStakeAction{680, 1000, 43_210_000, 22_110_000},
+ }},
+ },
+}
+
+// a quick helper function for making it easier to identify whose balances are missing
+func shift1AlgoBy(n uint64) uint64 { return 1_000_000 << n }
+
+// simpleOfflineOnlineScenario is like simpleOnlineScenario but with acct 2
+// going from online+expired to offline at round 999.
+var simpleOfflineOnlineScenario = onlineScenario{
+ roundActions: []onlineScenarioRound{
+ {1, []onlineScenarioRoundAction{
+ goOnlineWithStakeAction{basics.Address{1}, 1, 2000, shift1AlgoBy(1)},
+ goOnlineWithStakeAction{basics.Address{2}, 1, 999, shift1AlgoBy(2)},
+ }},
+ {500, []onlineScenarioRoundAction{
+ goOnlineWithStakeAction{basics.Address{3}, 500, 2500, shift1AlgoBy(3)},
+ }},
+ {600, []onlineScenarioRoundAction{
+ goOnlineWithStakeAction{basics.Address{4}, 600, 900, shift1AlgoBy(4)}, // expired by 1000
+ }},
+ {679, []onlineScenarioRoundAction{
+ goOnlineWithStakeAction{basics.Address{5}, 679, 999, shift1AlgoBy(5)}, // expired by 1000
+ goOfflineAction{basics.Address{2}}, // was going to expire at 999 but now is offline
+ }},
+ {680, []onlineScenarioRoundAction{
+ goOnlineWithStakeAction{basics.Address{6}, 680, 999, shift1AlgoBy(6)}, // expired by 1000
+ goOnlineWithStakeAction{basics.Address{7}, 680, 1000, shift1AlgoBy(7)},
+ }},
+ {1000, []onlineScenarioRoundAction{
+ checkOnlineStakeAction{680, 1000, 250_000_000, 112_000_000},
+ }},
+ },
+}
+
+//nolint:paralleltest // don't want to parallelize this test
+func TestOnlineAcctModelScenario(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ runScenario := func(t *testing.T, m onlineAcctModel, s onlineScenario) {
+ for _, ra := range s.roundActions {
+ m.advanceToRound(ra.rnd)
+ for _, action := range ra.actions {
+ action.apply(t, m)
+ }
+ }
+ }
+
+ for _, tc := range []struct {
+ name string
+ scenario onlineScenario
+ }{
+ {"Simple", simpleOnlineScenario},
+ {"SimpleOffline", simpleOfflineOnlineScenario},
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ // first test using the in-memory model
+ t.Run("Map", func(t *testing.T) {
+ m := newMapOnlineAcctModel(t)
+ runScenario(t, m, tc.scenario)
+ })
+ // test same scenario on double ledger
+ t.Run("DoubleLedger", func(t *testing.T) {
+ m := newDoubleLedgerAcctModel(t, protocol.ConsensusFuture, true)
+ defer m.teardown()
+ runScenario(t, m, tc.scenario)
+ })
+ })
+ }
+}
+
+func BenchmarkExpiredOnlineCirculation(b *testing.B) {
+ // set up totalAccounts online accounts in 10k batches
+ totalAccounts := 100_000
+ const maxKeyregPerBlock = 10_000
+ // if TOTAL_ACCOUNTS env var set, override totalAccounts
+ if n, err := strconv.Atoi(os.Getenv("TOTAL_ACCOUNTS")); err == nil {
+ b.Logf("using %d accounts", n)
+ if n%maxKeyregPerBlock != 0 {
+ b.Fatalf("TOTAL_ACCOUNTS %d must be a multiple of %d", n, maxKeyregPerBlock)
+ }
+ totalAccounts = n
+ }
+
+ proto := protocol.ConsensusFuture
+ m := newDoubleLedgerAcctModel(b, proto, false)
+ defer m.teardown()
+
+ addrFromUint64 := func(n uint64) basics.Address {
+ var addr basics.Address
+ binary.BigEndian.PutUint64(addr[:], n)
+ return addr
+ }
+
+ var blockCounter, acctCounter uint64
+ for i := 0; i < totalAccounts/maxKeyregPerBlock; i++ {
+ blockCounter++
+ for j := 0; j < maxKeyregPerBlock; j++ {
+ acctCounter++
+ // go online for a random number of rounds, from 400 to 1600
+ validFor := 400 + uint64(rand.Intn(1200))
+ m.goOnline(addrFromUint64(acctCounter), basics.Round(blockCounter), basics.Round(blockCounter+validFor))
+ }
+ b.Log("built block", blockCounter, "accts", acctCounter)
+ m.nextRound()
+ }
+ // then advance ~1K rounds to exercise the exercise accounts going offline
+ m.advanceToRound(basics.Round(blockCounter + 1000))
+ b.Log("advanced to round", m.currentRound())
+
+ b.ResetTimer()
+ for i := uint64(0); i < uint64(b.N); i++ {
+ // query expired circulation across the available range (last 320 rounds, from ~680 to ~1000)
+ startRnd := m.currentRound() - 320
+ offset := basics.Round(i % 320)
+ _, err := m.dl.validator.ExpiredOnlineCirculation(startRnd+offset, startRnd+offset+320)
+ require.NoError(b, err)
+ //total, err := m.dl.validator.OnlineTotalStake(startRnd + offset)
+ //b.Log("expired circulation", startRnd+offset, startRnd+offset+320, "returned", expiredStake, "total", total)
+ }
+}
diff --git a/ledger/acctonline_test.go b/ledger/acctonline_test.go
index 7360f30e9..0bee1f35f 100644
--- a/ledger/acctonline_test.go
+++ b/ledger/acctonline_test.go
@@ -21,6 +21,7 @@ import (
"database/sql"
"fmt"
"sort"
+ "strconv"
"testing"
"time"
@@ -129,7 +130,7 @@ func newBlock(t *testing.T, ml *mockLedgerForTracker, testProtocolVersion protoc
delta.Accts.MergeAccounts(updates)
delta.Totals = newTotals
- ml.trackers.newBlock(blk, delta)
+ ml.addBlock(blockEntry{block: blk}, delta)
return newTotals
}
@@ -777,11 +778,11 @@ func TestAcctOnlineRoundParamsCache(t *testing.T) {
delta.Totals = accumulateTotals(t, consensusVersion, []map[basics.Address]ledgercore.AccountData{totals}, rewardLevel)
allTotals[i] = delta.Totals
- ml.trackers.newBlock(blk, delta)
+ ml.addBlock(blockEntry{block: blk}, delta)
accts = append(accts, newAccts)
if i > basics.Round(maxBalLookback) && i%10 == 0 {
- onlineTotal, err := ao.onlineTotals(i - basics.Round(maxBalLookback))
+ onlineTotal, err := ao.onlineCirculation(i-basics.Round(maxBalLookback), i)
require.NoError(t, err)
require.Equal(t, allTotals[i-basics.Round(maxBalLookback)].Online.Money, onlineTotal)
expectedConsensusVersion := testProtocolVersion1
@@ -822,7 +823,7 @@ func TestAcctOnlineRoundParamsCache(t *testing.T) {
require.Equal(t, ao.onlineRoundParamsData[:basics.Round(maxBalLookback)], dbOnlineRoundParams)
for i := ml.Latest() - basics.Round(maxBalLookback); i < ml.Latest(); i++ {
- onlineTotal, err := ao.onlineTotals(i)
+ onlineTotal, err := ao.onlineCirculation(i, i+basics.Round(maxBalLookback))
require.NoError(t, err)
require.Equal(t, allTotals[i].Online.Money, onlineTotal)
}
@@ -1367,7 +1368,9 @@ func addSinkAndPoolAccounts(genesisAccts []map[basics.Address]basics.AccountData
func newBlockWithUpdates(genesisAccts []map[basics.Address]basics.AccountData, updates ledgercore.AccountDeltas, prevTotals ledgercore.AccountTotals, t *testing.T, ml *mockLedgerForTracker, round int, oa *onlineAccounts) ledgercore.AccountTotals {
base := genesisAccts[0]
- newTotals := newBlock(t, ml, protocol.ConsensusCurrentVersion, config.Consensus[protocol.ConsensusCurrentVersion], basics.Round(round), base, updates, prevTotals)
+ proto := ml.GenesisProtoVersion()
+ params := ml.GenesisProto()
+ newTotals := newBlock(t, ml, proto, params, basics.Round(round), base, updates, prevTotals)
commitSync(t, oa, ml, basics.Round(round))
return newTotals
}
@@ -1415,13 +1418,14 @@ func TestAcctOnlineTop(t *testing.T) {
genesisAccts[0][allAccts[i].Addr] = allAccts[i].AccountData
addSinkAndPoolAccounts(genesisAccts)
- ml := makeMockLedgerForTracker(t, true, 1, protocol.ConsensusCurrentVersion, genesisAccts)
+ // run this test on ConsensusV37 rules, run TestAcctOnlineTop_ChangeOnlineStake on current
+ ml := makeMockLedgerForTracker(t, true, 1, protocol.ConsensusV37, genesisAccts)
defer ml.Close()
conf := config.GetDefaultLocal()
au, oa := newAcctUpdates(t, ml, conf)
defer oa.close()
- initialOnlineTotals, err := oa.onlineTotals(0)
+ initialOnlineTotals, err := oa.onlineCirculation(0, basics.Round(oa.maxBalLookback()))
a.NoError(err)
top := compareOnlineTotals(a, oa, 0, 0, 5, initialOnlineTotals, initialOnlineTotals)
compareTopAccounts(a, top, allAccts)
@@ -1484,7 +1488,20 @@ func TestAcctOnlineTop(t *testing.T) {
func TestAcctOnlineTopInBatches(t *testing.T) {
partitiontest.PartitionTest(t)
- a := require.New(t)
+
+ intToAddress := func(n int) basics.Address {
+ var addr basics.Address
+ pos := 0
+ for {
+ addr[pos] = byte(n % 10)
+ n /= 10
+ if n == 0 {
+ break
+ }
+ pos++
+ }
+ return addr
+ }
const numAccts = 2048
allAccts := make([]basics.BalanceRecord, numAccts)
@@ -1493,11 +1510,11 @@ func TestAcctOnlineTopInBatches(t *testing.T) {
for i := 0; i < numAccts; i++ {
allAccts[i] = basics.BalanceRecord{
- Addr: ledgertesting.RandomAddress(),
+ Addr: intToAddress(i + 1),
AccountData: basics.AccountData{
MicroAlgos: basics.MicroAlgos{Raw: uint64(i + 1)},
Status: basics.Online,
- VoteLastValid: 1000,
+ VoteLastValid: basics.Round(i + 1),
VoteFirstValid: 0,
RewardsBase: 0},
}
@@ -1505,17 +1522,55 @@ func TestAcctOnlineTopInBatches(t *testing.T) {
}
addSinkAndPoolAccounts(genesisAccts)
- ml := makeMockLedgerForTracker(t, true, 1, protocol.ConsensusCurrentVersion, genesisAccts)
- defer ml.Close()
+ for _, proto := range []protocol.ConsensusVersion{protocol.ConsensusV36, protocol.ConsensusFuture} {
+ t.Run(string(proto), func(t *testing.T) {
+ a := require.New(t)
+ params := config.Consensus[proto]
+ ml := makeMockLedgerForTracker(t, true, 1, proto, genesisAccts)
+ defer ml.Close()
- conf := config.GetDefaultLocal()
- _, oa := newAcctUpdates(t, ml, conf)
- defer oa.close()
+ conf := config.GetDefaultLocal()
+ au, oa := newAcctUpdates(t, ml, conf)
+ defer oa.close()
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
- top, _, err := oa.TopOnlineAccounts(0, 0, 2048, &proto, 0)
- a.NoError(err)
- compareTopAccounts(a, top, allAccts)
+ top, totalOnlineStake, err := oa.TopOnlineAccounts(0, 0, numAccts, &params, 0)
+ a.NoError(err)
+ compareTopAccounts(a, top, allAccts)
+ a.Equal(basics.MicroAlgos{Raw: 2048 * 2049 / 2}, totalOnlineStake)
+
+ // add 300 blocks so the first 300 accounts expire
+ // at the last block put the 299th account offline to trigger TopOnlineAccounts behavior difference
+ _, totals, err := au.LatestTotals()
+ a.NoError(err)
+ acct299 := allAccts[298]
+ for i := 1; i <= 300; i++ {
+ var updates ledgercore.AccountDeltas
+ if i == 300 {
+ updates.Upsert(acct299.Addr, ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{Status: basics.Offline, MicroAlgos: acct299.MicroAlgos},
+ VotingData: ledgercore.VotingData{},
+ })
+ }
+ newBlockWithUpdates(genesisAccts, updates, totals, t, ml, i, oa)
+ }
+ a.Equal(basics.Round(300), oa.latest())
+
+ // 299 accts expired at voteRnd = 300
+ top, totalOnlineStake, err = oa.TopOnlineAccounts(0, 300, numAccts, &params, 0)
+ a.NoError(err)
+ compareTopAccounts(a, top, allAccts)
+ a.Equal(basics.MicroAlgos{Raw: 2048*2049/2 - 299*300/2}, totalOnlineStake)
+
+ // check the behavior difference between ConsensusV36 and ConsensusFuture
+ var correction uint64
+ if proto == protocol.ConsensusV36 {
+ correction = acct299.MicroAlgos.Raw
+ }
+ _, totalOnlineStake, err = oa.TopOnlineAccounts(300, 300, numAccts, &params, 0)
+ a.NoError(err)
+ a.Equal(basics.MicroAlgos{Raw: 2048*2049/2 - 299*300/2 - correction}, totalOnlineStake)
+ })
+ }
}
func TestAcctOnlineTopBetweenCommitAndPostCommit(t *testing.T) {
@@ -1700,7 +1755,7 @@ func TestAcctOnlineTopDBBehindMemRound(t *testing.T) {
a.Contains(err.Error(), "is behind in-memory round")
case <-time.After(1 * time.Minute):
- a.FailNow("timedout while waiting for post commit")
+ a.FailNow("timeout while waiting for post commit")
}
}
@@ -1763,7 +1818,8 @@ func TestAcctOnlineTop_ChangeOnlineStake(t *testing.T) {
totals = newBlockWithUpdates(genesisAccts, updates, totals, t, ml, i, oa)
}
- initialOnlineStake, err := oa.onlineTotals(0)
+ params := config.Consensus[protocol.ConsensusCurrentVersion]
+ initialOnlineStake, err := oa.onlineCirculation(0, basics.Round(params.MaxBalLookback))
a.NoError(err)
rnd15TotalOnlineStake := algops.Sub(initialOnlineStake, allAccts[15].MicroAlgos) // 15 is offline
@@ -1782,7 +1838,7 @@ func TestAcctOnlineTop_ChangeOnlineStake(t *testing.T) {
voteRndExpectedStake = algops.Sub(voteRndExpectedStake, allAccts[18].MicroAlgos) // Online on rnd but not valid on voteRnd
updatedAccts[15].Status = basics.Offline // Mark account 15 offline for comparison
updatedAccts[18].Status = basics.Offline // Mark account 18 offline for comparison
- top = compareOnlineTotals(a, oa, 18, 19, 5, rnd15TotalOnlineStake, voteRndExpectedStake)
+ top = compareOnlineTotals(a, oa, 18, 19, 5, voteRndExpectedStake, voteRndExpectedStake)
compareTopAccounts(a, top, updatedAccts)
}
@@ -1808,9 +1864,318 @@ func compareOnlineTotals(a *require.Assertions, oa *onlineAccounts, rnd, voteRnd
top, onlineTotalVoteRnd, err := oa.TopOnlineAccounts(rnd, voteRnd, n, &proto, 0)
a.NoError(err)
a.Equal(expectedForVoteRnd, onlineTotalVoteRnd)
- onlineTotalsRnd, err := oa.onlineTotals(rnd)
+ onlineTotalsRnd, err := oa.onlineCirculation(rnd, voteRnd)
a.NoError(err)
a.Equal(expectedForRnd, onlineTotalsRnd)
a.LessOrEqual(onlineTotalVoteRnd.Raw, onlineTotalsRnd.Raw)
return top
}
+
+// TestAcctOnline_ExpiredOnlineCirculation mutates online state in deltas and DB
+// to ensure ExpiredOnlineCirculation returns expected online stake value
+// The test exercises all possible combinations for offline, online and expired values for two accounts.
+func TestAcctOnline_ExpiredOnlineCirculation(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ algops := MicroAlgoOperations{a: a}
+
+ // powInt is a helper function to calculate powers of uint64
+ powInt := func(x, y uint64) uint64 {
+ ret := uint64(1)
+ if x == 0 {
+ return ret
+ }
+ for i := uint64(0); i < y; i++ {
+ ret *= x
+ }
+ return ret
+ }
+
+ // add some genesis online accounts with stake 1, 10, 20, 30... in order to see which account stake
+ // not included into results while debugging
+ const numAccts = 20
+ allAccts := make([]basics.BalanceRecord, numAccts)
+ genesisAccts := []map[basics.Address]basics.AccountData{{}}
+ genesisAccts[0] = make(map[basics.Address]basics.AccountData, numAccts)
+ totalStake := basics.MicroAlgos{Raw: 0}
+ for i := 0; i < numAccts-1; i++ {
+ stake := i * 10
+ if stake == 0 {
+ stake = 1
+ }
+ allAccts[i] = basics.BalanceRecord{
+ Addr: ledgertesting.RandomAddress(),
+ AccountData: basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: uint64(stake)},
+ Status: basics.Online,
+ VoteLastValid: 10000,
+ VoteFirstValid: 0,
+ RewardsBase: 0},
+ }
+ genesisAccts[0][allAccts[i].Addr] = allAccts[i].AccountData
+ totalStake = algops.Add(totalStake, allAccts[i].MicroAlgos)
+ }
+
+ addSinkAndPoolAccounts(genesisAccts)
+
+ proto := protocol.ConsensusFuture
+ params := config.Consensus[proto]
+ ml := makeMockLedgerForTracker(t, true, 1, proto, genesisAccts)
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ conf.MaxAcctLookback = 4 // technically the test work for any value of MaxAcctLookback but takes too long
+ // t.Logf("Running MaxAcctLookback=%d", conf.MaxAcctLookback)
+ au, oa := newAcctUpdates(t, ml, conf)
+ defer oa.close()
+
+ // close commitSyncer goroutine to prevent possible race between commitSyncer and commitSync
+ ml.trackers.ctxCancel()
+ ml.trackers.ctxCancel = nil
+ <-ml.trackers.commitSyncerClosed
+ ml.trackers.commitSyncerClosed = nil
+
+ // initial precondition checks on online stake
+ _, totals, err := au.LatestTotals()
+ a.NoError(err)
+ a.Equal(totalStake, totals.Online.Money)
+ initialOnlineStake, err := oa.onlineCirculation(0, basics.Round(oa.maxBalLookback()))
+ a.NoError(err)
+ a.Equal(totalStake, initialOnlineStake)
+ initialExpired, err := oa.ExpiredOnlineCirculation(0, 1000)
+ a.NoError(err)
+ a.Equal(basics.MicroAlgos{Raw: 0}, initialExpired)
+
+ type dbState uint64
+ const (
+ dbOffline dbState = iota
+ dbOnline
+ dbOnlineExpired
+ )
+
+ type deltaState uint64
+ const (
+ deltaNoChange deltaState = iota
+ deltaOffpired // offline (addrA) or expired (addrB)
+ deltaOnline
+ )
+
+ type acctState uint64
+ const (
+ acctStateUnknown acctState = iota
+ acctStateOffline
+ acctStateOnline
+ acctStateExpired
+ )
+
+ // take two first accounts for the test - 0 and 1 - with stake 1 and 10 correspondingly
+ addrA := allAccts[0].Addr
+ stakeA := allAccts[0].MicroAlgos
+ statesA := map[acctState]ledgercore.AccountData{
+ acctStateOffline: {AccountBaseData: ledgercore.AccountBaseData{Status: basics.Offline, MicroAlgos: stakeA}, VotingData: ledgercore.VotingData{}},
+ acctStateOnline: {AccountBaseData: ledgercore.AccountBaseData{Status: basics.Online, MicroAlgos: stakeA}, VotingData: ledgercore.VotingData(allAccts[0].OnlineAccountData().VotingData)},
+ }
+
+ addrB := allAccts[1].Addr
+ stakeB := allAccts[1].MicroAlgos
+ votingDataB := allAccts[1].OnlineAccountData().VotingData
+ statesB := map[acctState]ledgercore.AccountData{
+ acctStateOffline: {AccountBaseData: ledgercore.AccountBaseData{Status: basics.Offline, MicroAlgos: stakeB}, VotingData: ledgercore.VotingData{}},
+ acctStateOnline: {AccountBaseData: ledgercore.AccountBaseData{Status: basics.Online, MicroAlgos: stakeB}, VotingData: ledgercore.VotingData(votingDataB)},
+ }
+ expStatesB := func(state acctState, voteRnd basics.Round) ledgercore.AccountData {
+ vd := ledgercore.VotingData(votingDataB)
+ switch state {
+ case acctStateExpired:
+ vd.VoteLastValid = voteRnd - 1
+ case acctStateOnline:
+ vd.VoteLastValid = voteRnd + 1
+ default:
+ a.Fail("invalid acct state")
+ }
+ return ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{Status: basics.Online, MicroAlgos: stakeB},
+ VotingData: vd,
+ }
+ }
+
+ // try all possible online/offline delta states for account A
+ // try all possible valid/expired VoteLastValid for account B
+ // - generate {offline, online, online-expired} db states (two rounds committed) for account A and B
+ // - generate all combinations of deltaState {not changed, offline/expired, online} of size conf.MaxAcctLookback arrays
+ // - test all combinations in 3^2 * 3^conf.MaxAcctLookback tests
+ rnd := basics.Round(1)
+ accounts := []map[basics.Address]basics.AccountData{genesisAccts[0]} // base state
+ dbStates := []dbState{dbOffline, dbOnline, dbOnlineExpired}
+ deltaStates := []deltaState{deltaNoChange, deltaOffpired, deltaOnline}
+ const dbRoundsToCommit = 2
+ for dbCombo := uint64(0); dbCombo < powInt(uint64(len(dbStates)), dbRoundsToCommit); dbCombo++ {
+ for deltaCombo := uint64(0); deltaCombo < powInt(uint64(len(deltaStates)), conf.MaxAcctLookback); deltaCombo++ {
+ var stateA acctState
+ var stateB acctState
+
+ ternDb := strconv.FormatUint(dbCombo, 3)
+ ternDb = fmt.Sprintf("%0*s", dbRoundsToCommit, ternDb)
+
+ ternDelta := strconv.FormatUint(deltaCombo, 3)
+ ternDelta = fmt.Sprintf("%0*s", conf.MaxAcctLookback, ternDelta)
+ // uncomment for debugging
+ // t.Logf("db=%d|delta=%d <==> older->%s<-db top | first->%s<-last", dbCombo, deltaCombo, ternDb, ternDelta)
+
+ targetVoteRnd := rnd +
+ basics.Round(conf.MaxAcctLookback) /* all deltas */ +
+ 2 /* db state committed */ +
+ basics.Round(params.MaxBalLookback)
+
+ // mutate the committed state
+ // addrA, addrB: offline, online not expired, online expired
+ dbSeed := dbState(9999) // not initialized
+ for i := uint64(0); i < dbRoundsToCommit; i++ {
+ combo := ternDb[i]
+ d, err := strconv.Atoi(string(combo))
+ a.NoError(err)
+ if i == dbRoundsToCommit-1 {
+ dbSeed = dbState(d)
+ }
+
+ var updates ledgercore.AccountDeltas
+ switch dbState(d) {
+ case dbOffline:
+ updates.Upsert(addrA, statesA[acctStateOffline])
+ updates.Upsert(addrB, statesB[acctStateOffline])
+ case dbOnline:
+ updates.Upsert(addrA, statesA[acctStateOnline])
+ updates.Upsert(addrB, statesB[acctStateOnline])
+ case dbOnlineExpired:
+ state := statesA[acctStateOnline]
+ state.VoteLastValid = targetVoteRnd - 1
+ updates.Upsert(addrA, state)
+ state = statesB[acctStateOnline]
+ state.VoteLastValid = targetVoteRnd - 1
+ updates.Upsert(addrB, state)
+ default:
+ a.Fail("unknown db state")
+ }
+ base := accounts[rnd-1]
+ accounts = append(accounts, applyPartialDeltas(base, updates))
+ totals = newBlock(t, ml, proto, params, rnd, base, updates, totals)
+ rnd++
+ }
+
+ // assert on expected online totals
+ switch dbSeed {
+ case dbOffline:
+ // both accounts are offline, decrease the original stake
+ a.Equal(initialOnlineStake.Raw-(stakeA.Raw+stakeB.Raw), totals.Online.Money.Raw)
+ case dbOnline, dbOnlineExpired: // being expired does not decrease the stake
+ a.Equal(initialOnlineStake, totals.Online.Money)
+ }
+
+ // mutate in-memory state
+ for i := uint64(0); i < conf.MaxAcctLookback; i++ {
+ combo := ternDelta[i]
+ d, err := strconv.Atoi(string(combo))
+ a.NoError(err)
+
+ var updates ledgercore.AccountDeltas
+ switch deltaState(d) {
+ case deltaNoChange:
+ case deltaOffpired:
+ updates.Upsert(addrA, statesA[acctStateOffline])
+ updates.Upsert(addrB, expStatesB(acctStateExpired, targetVoteRnd))
+ stateA = acctStateOffline
+ stateB = acctStateExpired
+ case deltaOnline:
+ updates.Upsert(addrA, statesA[acctStateOnline])
+ updates.Upsert(addrB, expStatesB(acctStateOnline, targetVoteRnd))
+ stateA = acctStateOnline
+ stateB = acctStateOnline
+
+ default:
+ a.Fail("unknown delta seed")
+ }
+ base := accounts[rnd-1]
+ accounts = append(accounts, applyPartialDeltas(base, updates))
+ totals = newBlock(t, ml, proto, params, rnd, base, updates, totals)
+ rnd++
+ }
+
+ commitSync(t, oa, ml, basics.Round(rnd-1))
+ a.Equal(int(conf.MaxAcctLookback), len(oa.deltas)) // ensure the only expected deltas are not flushed
+
+ var expiredAccts map[basics.Address]*ledgercore.OnlineAccountData
+ err = ml.trackers.dbs.Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) error {
+ reader, err := tx.MakeAccountsReader()
+ if err != nil {
+ return err
+ }
+ expiredAccts, err = reader.ExpiredOnlineAccountsForRound(rnd-1, targetVoteRnd, params, 0)
+ if err != nil {
+ return err
+ }
+ return nil
+ })
+ a.NoError(err)
+
+ if dbSeed == dbOffline || dbSeed == dbOnline {
+ a.Empty(expiredAccts)
+ } else {
+ a.Len(expiredAccts, 2)
+ for _, acct := range expiredAccts {
+ a.NotZero(acct.VoteLastValid)
+ }
+ }
+
+ expectedExpiredStake := basics.MicroAlgos{}
+ // if both A and B were offline or online in DB then the expired stake is changed only if account is expired in deltas
+ // => check if B expired
+ // if both A and B were expired in DB then the expired stake is changed when any of them goes offline or online
+ // => check if A or B are offline or online
+ switch dbSeed {
+ case dbOffline, dbOnline:
+ if stateB == acctStateExpired {
+ expectedExpiredStake.Raw += stakeB.Raw
+ }
+ case dbOnlineExpired:
+ expectedExpiredStake.Raw += stakeA.Raw
+ expectedExpiredStake.Raw += stakeB.Raw
+ if stateA == acctStateOnline || stateA == acctStateOffline {
+ expectedExpiredStake.Raw -= stakeA.Raw
+ }
+ if stateB == acctStateOnline || stateB == acctStateOffline {
+ expectedExpiredStake.Raw -= stakeB.Raw
+ }
+ default:
+ a.Fail("unknown db seed")
+ }
+ a.Equal(targetVoteRnd, rnd+basics.Round(params.MaxBalLookback))
+ _, err := oa.ExpiredOnlineCirculation(rnd, targetVoteRnd)
+ a.Error(err)
+ a.Contains(err.Error(), fmt.Sprintf("round %d too high", rnd))
+ expiredStake, err := oa.ExpiredOnlineCirculation(rnd-1, targetVoteRnd)
+ a.NoError(err)
+ a.Equal(expectedExpiredStake, expiredStake)
+
+ // restore the original state of accounts A and B
+ updates := ledgercore.AccountDeltas{}
+ base := accounts[rnd-1]
+ updates.Upsert(addrA, statesA[acctStateOnline])
+ updates.Upsert(addrB, ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{Status: basics.Online, MicroAlgos: stakeB}, VotingData: ledgercore.VotingData(votingDataB),
+ })
+ accounts = append(accounts, applyPartialDeltas(base, updates))
+ totals = newBlock(t, ml, proto, params, rnd, base, updates, totals)
+ rnd++
+ // add conf.MaxAcctLookback empty blocks to flush/restore the original state
+ for i := uint64(0); i < conf.MaxAcctLookback; i++ {
+ var updates ledgercore.AccountDeltas
+ base = accounts[rnd-1]
+ accounts = append(accounts, base)
+ totals = newBlock(t, ml, proto, params, rnd, base, updates, totals)
+ rnd++
+ }
+ commitSync(t, oa, ml, basics.Round(rnd-1))
+ a.Equal(int(conf.MaxAcctLookback), len(oa.deltas))
+ }
+ }
+}
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index 20431bce7..5640800ce 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -24,7 +24,6 @@ import (
"sort"
"strings"
"sync"
- "sync/atomic"
"time"
"github.com/algorand/go-deadlock"
@@ -839,7 +838,12 @@ func (aul *accountUpdatesLedgerEvaluator) GenesisProto() config.ConsensusParams
// VotersForStateProof returns the top online accounts at round rnd.
func (aul *accountUpdatesLedgerEvaluator) VotersForStateProof(rnd basics.Round) (voters *ledgercore.VotersForRound, err error) {
- return aul.ao.voters.getVoters(rnd)
+ return aul.ao.voters.VotersForStateProof(rnd)
+}
+
+func (aul *accountUpdatesLedgerEvaluator) GetStateProofVerificationContext(_ basics.Round) (*ledgercore.StateProofVerificationContext, error) {
+ // Since state proof transaction is not being verified (we only apply the change) during replay, we don't need to implement this function at the moment.
+ return nil, fmt.Errorf("accountUpdatesLedgerEvaluator: GetStateProofVerificationContext, needed for state proof verification, is not implemented in accountUpdatesLedgerEvaluator")
}
// BlockHdr returns the header of the given round. When the evaluator is running, it's only referring to the previous header, which is what we
@@ -901,20 +905,6 @@ func (aul *accountUpdatesLedgerEvaluator) GetCreatorForRound(rnd basics.Round, c
return aul.au.getCreatorForRound(rnd, cidx, ctype, false /* don't sync */)
}
-// onlineTotals returns the online totals of all accounts at the end of round rnd.
-// used in tests only
-func (au *accountUpdates) onlineTotals(rnd basics.Round) (basics.MicroAlgos, error) {
- au.accountsMu.RLock()
- defer au.accountsMu.RUnlock()
- offset, err := au.roundOffset(rnd)
- if err != nil {
- return basics.MicroAlgos{}, err
- }
-
- totals := au.roundTotals[offset]
- return totals.Online.Money, nil
-}
-
// latestTotalsImpl returns the totals of all accounts for the most recent round, as well as the round number
func (au *accountUpdates) latestTotalsImpl() (basics.Round, ledgercore.AccountTotals, error) {
offset := len(au.deltas)
@@ -1611,7 +1601,7 @@ func (au *accountUpdates) roundOffset(rnd basics.Round) (offset uint64, err erro
return off, nil
}
-func (au *accountUpdates) handleUnorderedCommit(dcc *deferredCommitContext) {
+func (au *accountUpdates) handleUnorderedCommitOrError(dcc *deferredCommitContext) {
}
// prepareCommit prepares data to write to the database a "chunk" of rounds, and update the cached dbRound accordingly.
@@ -1634,14 +1624,6 @@ func (au *accountUpdates) prepareCommit(dcc *deferredCommitContext) error {
// verify version correctness : all the entries in the au.versions[1:offset+1] should have the *same* version, and the committedUpTo should be enforcing that.
if au.versions[1] != au.versions[offset] {
au.accountsMu.RUnlock()
-
- // in scheduleCommit, we expect that this function to update the catchpointWriting when
- // it's on a catchpoint round and the node is configured to generate catchpoints. Doing this in a deferred function
- // here would prevent us from "forgetting" to update this variable later on.
- // The same is repeated in commitRound on errors.
- if dcc.catchpointFirstStage && dcc.enableGeneratingCatchpointFiles {
- atomic.StoreInt32(dcc.catchpointDataWriting, 0)
- }
return fmt.Errorf("attempted to commit series of rounds with non-uniform consensus versions")
}
@@ -1673,14 +1655,6 @@ func (au *accountUpdates) commitRound(ctx context.Context, tx trackerdb.Transact
offset := dcc.offset
dbRound := dcc.oldBase
- defer func() {
- if err != nil {
- if dcc.catchpointFirstStage && dcc.enableGeneratingCatchpointFiles {
- atomic.StoreInt32(dcc.catchpointDataWriting, 0)
- }
- }
- }()
-
_, err = tx.ResetTransactionWarnDeadline(ctx, time.Now().Add(accountsUpdatePerRoundHighWatermark*time.Duration(offset)))
if err != nil {
return err
diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go
index ff9ff9efe..0faaa4de6 100644
--- a/ledger/acctupdates_test.go
+++ b/ledger/acctupdates_test.go
@@ -35,7 +35,7 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/eval"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/ledger/store/trackerdb"
"github.com/algorand/go-algorand/ledger/store/trackerdb/sqlitedriver"
@@ -44,6 +44,7 @@ import (
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/algorand/go-algorand/util/db"
+ "github.com/algorand/go-deadlock"
)
var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
@@ -60,10 +61,26 @@ type mockLedgerForTracker struct {
consensusVersion protocol.ConsensusVersion
accts map[basics.Address]basics.AccountData
+ mu deadlock.RWMutex
+
// trackerRegistry manages persistence into DB so we have to have it here even for a single tracker test
trackers trackerRegistry
}
+// onlineTotals returns the online totals of all accounts at the end of round rnd.
+// used in tests only
+func (au *accountUpdates) onlineTotals(rnd basics.Round) (basics.MicroAlgos, error) {
+ au.accountsMu.RLock()
+ defer au.accountsMu.RUnlock()
+ offset, err := au.roundOffset(rnd)
+ if err != nil {
+ return basics.MicroAlgos{}, err
+ }
+
+ totals := au.roundTotals[offset]
+ return totals.Online.Money, nil
+}
+
func accumulateTotals(t testing.TB, consensusVersion protocol.ConsensusVersion, accts []map[basics.Address]ledgercore.AccountData, rewardLevel uint64) (totals ledgercore.AccountTotals) {
var ot basics.OverflowTracker
proto := config.Consensus[consensusVersion]
@@ -182,17 +199,29 @@ func (ml *mockLedgerForTracker) Close() {
}
func (ml *mockLedgerForTracker) Latest() basics.Round {
+ ml.mu.RLock()
+ defer ml.mu.RUnlock()
return basics.Round(len(ml.blocks)) - 1
}
-func (ml *mockLedgerForTracker) addMockBlock(be blockEntry, delta ledgercore.StateDelta) error {
+func (ml *mockLedgerForTracker) addBlock(be blockEntry, delta ledgercore.StateDelta) {
+ ml.addToBlockQueue(be, delta)
+ ml.trackers.newBlock(be.block, delta)
+}
+
+func (ml *mockLedgerForTracker) addToBlockQueue(be blockEntry, delta ledgercore.StateDelta) {
+ ml.mu.Lock()
+ defer ml.mu.Unlock()
+
ml.blocks = append(ml.blocks, be)
ml.deltas = append(ml.deltas, delta)
- return nil
}
-func (ml *mockLedgerForTracker) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger internal.LedgerForEvaluator) (ledgercore.StateDelta, error) {
- // support returning the deltas if the client explicitly provided them by calling addMockBlock, otherwise,
+func (ml *mockLedgerForTracker) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger eval.LedgerForEvaluator) (ledgercore.StateDelta, error) {
+ ml.mu.RLock()
+ defer ml.mu.RUnlock()
+
+ // support returning the deltas if the client explicitly provided them by calling addToBlockQueue, otherwise,
// just return an empty state delta ( since the client clearly didn't care about these )
if len(ml.deltas) > int(blk.Round()) {
return ml.deltas[uint64(blk.Round())], nil
@@ -207,6 +236,9 @@ func (ml *mockLedgerForTracker) Block(rnd basics.Round) (bookkeeping.Block, erro
return bookkeeping.Block{}, fmt.Errorf("rnd %d out of bounds", rnd)
}
+ ml.mu.Lock()
+ defer ml.mu.Unlock()
+
return ml.blocks[int(rnd)].block, nil
}
@@ -215,6 +247,9 @@ func (ml *mockLedgerForTracker) BlockHdr(rnd basics.Round) (bookkeeping.BlockHea
return bookkeeping.BlockHeader{}, fmt.Errorf("rnd %d out of bounds", rnd)
}
+ ml.mu.RLock()
+ defer ml.mu.RUnlock()
+
return ml.blocks[int(rnd)].block.BlockHeader, nil
}
@@ -304,7 +339,7 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, ao *onlineAccounts, base
require.Equal(t, latestRnd, latest)
// the log has "onlineAccounts failed to fetch online totals for rnd" warning that is expected
- _, err := ao.onlineTotals(latest + 1)
+ _, err := ao.onlineCirculation(latest+1, latest+1+basics.Round(ao.maxBalLookback()))
require.Error(t, err)
var validThrough basics.Round
@@ -313,7 +348,8 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, ao *onlineAccounts, base
require.Equal(t, basics.Round(0), validThrough)
if base > 0 && base >= basics.Round(ao.maxBalLookback()) {
- _, err := ao.onlineTotals(base - basics.Round(ao.maxBalLookback()))
+ rnd := base - basics.Round(ao.maxBalLookback())
+ _, err := ao.onlineCirculation(rnd, base)
require.Error(t, err)
_, validThrough, err = au.LookupWithoutRewards(base-1, ledgertesting.RandomAddress())
@@ -376,7 +412,7 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, ao *onlineAccounts, base
bll := accts[rnd]
require.Equal(t, all, bll)
- totals, err := ao.onlineTotals(rnd)
+ totals, err := ao.onlineCirculation(rnd, rnd+basics.Round(ao.maxBalLookback()))
require.NoError(t, err)
require.Equal(t, totals.Raw, totalOnline)
@@ -547,7 +583,7 @@ func testAcctUpdates(t *testing.T, conf config.Local) {
delta.Creatables = creatablesFromUpdates(base, updates, knownCreatables)
delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]ledgercore.AccountData{totals}, rewardLevel)
- ml.trackers.newBlock(blk, delta)
+ ml.addBlock(blockEntry{block: blk}, delta)
accts = append(accts, newAccts)
rewardsLevels = append(rewardsLevels, rewardLevel)
@@ -667,7 +703,7 @@ func BenchmarkBalancesChanges(b *testing.B) {
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
- ml.trackers.newBlock(blk, delta)
+ ml.addBlock(blockEntry{block: blk}, delta)
accts = append(accts, newAccts)
rewardsLevels = append(rewardsLevels, rewardLevel)
}
@@ -854,7 +890,7 @@ func testAcctUpdatesUpdatesCorrectness(t *testing.T, cfg config.Local) {
for addr, ad := range updates {
delta.Accts.Upsert(addr, ad)
}
- ml.trackers.newBlock(blk, delta)
+ ml.addBlock(blockEntry{block: blk}, delta)
ml.trackers.committedUpTo(i)
}
lastRound := i - 1
@@ -1735,8 +1771,7 @@ func TestAcctUpdatesCachesInitialization(t *testing.T) {
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]ledgercore.AccountData{totals}, rewardLevel)
- ml.addMockBlock(blockEntry{block: blk}, delta)
- ml.trackers.newBlock(blk, delta)
+ ml.addBlock(blockEntry{block: blk}, delta)
ml.trackers.committedUpTo(basics.Round(i))
ml.trackers.waitAccountsWriting()
accts = append(accts, newAccts)
@@ -1825,8 +1860,7 @@ func TestAcctUpdatesSplittingConsensusVersionCommits(t *testing.T) {
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]ledgercore.AccountData{totals}, rewardLevel)
- ml.addMockBlock(blockEntry{block: blk}, delta)
- ml.trackers.newBlock(blk, delta)
+ ml.addBlock(blockEntry{block: blk}, delta)
accts = append(accts, newAccts)
rewardsLevels = append(rewardsLevels, rewardLevel)
}
@@ -1863,8 +1897,7 @@ func TestAcctUpdatesSplittingConsensusVersionCommits(t *testing.T) {
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]ledgercore.AccountData{totals}, rewardLevel)
- ml.addMockBlock(blockEntry{block: blk}, delta)
- ml.trackers.newBlock(blk, delta)
+ ml.addBlock(blockEntry{block: blk}, delta)
accts = append(accts, newAccts)
rewardsLevels = append(rewardsLevels, rewardLevel)
}
@@ -1932,8 +1965,7 @@ func TestAcctUpdatesSplittingConsensusVersionCommitsBoundary(t *testing.T) {
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]ledgercore.AccountData{totals}, rewardLevel)
- ml.addMockBlock(blockEntry{block: blk}, delta)
- ml.trackers.newBlock(blk, delta)
+ ml.addBlock(blockEntry{block: blk}, delta)
accts = append(accts, newAccts)
rewardsLevels = append(rewardsLevels, rewardLevel)
}
@@ -1969,8 +2001,7 @@ func TestAcctUpdatesSplittingConsensusVersionCommitsBoundary(t *testing.T) {
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]ledgercore.AccountData{totals}, rewardLevel)
- ml.addMockBlock(blockEntry{block: blk}, delta)
- ml.trackers.newBlock(blk, delta)
+ ml.addBlock(blockEntry{block: blk}, delta)
accts = append(accts, newAccts)
rewardsLevels = append(rewardsLevels, rewardLevel)
}
@@ -2007,8 +2038,7 @@ func TestAcctUpdatesSplittingConsensusVersionCommitsBoundary(t *testing.T) {
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]ledgercore.AccountData{totals}, rewardLevel)
- ml.addMockBlock(blockEntry{block: blk}, delta)
- ml.trackers.newBlock(blk, delta)
+ ml.addBlock(blockEntry{block: blk}, delta)
accts = append(accts, newAccts)
rewardsLevels = append(rewardsLevels, rewardLevel)
}
@@ -2137,7 +2167,7 @@ func TestAcctUpdatesResources(t *testing.T) {
delta.Creatables = creatablesFromUpdates(base, updates, knownCreatables)
delta.Totals = newTotals
- ml.trackers.newBlock(blk, delta)
+ ml.addBlock(blockEntry{block: blk}, delta)
// commit changes synchroniously
_, maxLookback := au.committedUpTo(i)
@@ -2320,7 +2350,7 @@ func testAcctUpdatesLookupRetry(t *testing.T, assertFn func(au *accountUpdates,
delta.Accts.MergeAccounts(updates)
delta.Creatables = creatablesFromUpdates(base, updates, knownCreatables)
delta.Totals = accumulateTotals(t, testProtocolVersion, []map[basics.Address]ledgercore.AccountData{totals}, rewardLevel)
- ml.trackers.newBlock(blk, delta)
+ ml.addBlock(blockEntry{block: blk}, delta)
accts = append(accts, newAccts)
rewardsLevels = append(rewardsLevels, rewardLevel)
diff --git a/ledger/applications_test.go b/ledger/applications_test.go
index 436791b66..d89f03a67 100644
--- a/ledger/applications_test.go
+++ b/ledger/applications_test.go
@@ -203,11 +203,10 @@ return`
Header: txHeader,
ApplicationCallTxnFields: appCreateFields,
}
- err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCreate, transactions.ApplyData{ApplicationID: 1})
+ appIdx := basics.AppIndex(1001) // first tnx => idx = 1001
+ err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCreate, transactions.ApplyData{ApplicationID: appIdx})
a.NoError(err)
- appIdx := basics.AppIndex(1) // first tnx => idx = 1
-
// opt-in, do no write
txHeader.Sender = userOptin
appCallFields := transactions.ApplicationCallTxnFields{
@@ -430,11 +429,10 @@ return`
Header: txHeader,
ApplicationCallTxnFields: appCreateFields,
}
- err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCreate, transactions.ApplyData{ApplicationID: 1})
+ appIdx := basics.AppIndex(1001) // first txn => idx = 1001 since AppForbidLowResources sets tx counter to 1000
+ err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCreate, transactions.ApplyData{ApplicationID: appIdx})
a.NoError(err)
- appIdx := basics.AppIndex(1) // first tnx => idx = 1
-
// opt-in, write to local
txHeader.Sender = userLocal
appCallFields := transactions.ApplicationCallTxnFields{
@@ -673,11 +671,10 @@ return`
Header: txHeader,
ApplicationCallTxnFields: appCreateFields,
}
- err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCreate, transactions.ApplyData{ApplicationID: 1})
+ appIdx := basics.AppIndex(1001) // first tnx => idx = 1001
+ err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCreate, transactions.ApplyData{ApplicationID: appIdx})
a.NoError(err)
- appIdx := basics.AppIndex(1) // first tnx => idx = 1
-
// opt-in, write to local
txHeader.Sender = userLocal
appCallFields := transactions.ApplicationCallTxnFields{
@@ -827,11 +824,10 @@ return`
Header: txHeader,
ApplicationCallTxnFields: appCreateFields,
}
- err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCreate, transactions.ApplyData{ApplicationID: 1})
+ appIdx := basics.AppIndex(1001) // first tnx => idx = 1001
+ err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCreate, transactions.ApplyData{ApplicationID: appIdx})
a.NoError(err)
- appIdx := basics.AppIndex(1) // first tnx => idx = 1
-
// destoy the app
txHeader.Sender = creator
appCallFields := transactions.ApplicationCallTxnFields{
@@ -1150,7 +1146,7 @@ int 1
}
// create application
- appIdx := basics.AppIndex(1) // first tnx => idx = 1
+ appIdx := basics.AppIndex(1001) // first tnx => idx = 1001
approvalProgram := program
clearStateProgram := []byte("\x02") // empty
@@ -1331,8 +1327,8 @@ return
a.Greater(len(ops.Program), 1)
program := ops.Program
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
- genesisInitState, initKeys := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 1000000)
+ proto := config.Consensus[protocol.ConsensusFuture]
+ genesisInitState, initKeys := ledgertesting.GenerateInitState(t, protocol.ConsensusFuture, 1000000)
creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4")
a.NoError(err)
@@ -1357,7 +1353,7 @@ return
GenesisHash: genesisInitState.GenesisHash,
}
- appIdx := basics.AppIndex(2) // second tnx => idx = 2
+ appIdx := basics.AppIndex(1002) // second tnx => idx = 1002
// fund app account
fundingPayment := transactions.Transaction{
@@ -1386,7 +1382,7 @@ return
Header: txHeader,
ApplicationCallTxnFields: appCreateFields,
}
- err = l1.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCreate, transactions.ApplyData{ApplicationID: 2})
+ err = l1.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCreate, transactions.ApplyData{ApplicationID: appIdx})
a.NoError(err)
// few empty blocks to reset deltas and flush
diff --git a/ledger/apply/application.go b/ledger/apply/application.go
index 3d03349eb..3522b6080 100644
--- a/ledger/apply/application.go
+++ b/ledger/apply/application.go
@@ -379,7 +379,7 @@ func ApplicationCall(ac transactions.ApplicationCallTxnFields, header transactio
// Ensure that the only operation we can do is ClearState if the application
// does not exist
if !exists && ac.OnCompletion != transactions.ClearStateOC {
- return fmt.Errorf("only clearing out is supported for applications that do not exist")
+ return fmt.Errorf("only ClearState is supported for an application (%d) that does not exist", appIdx)
}
// If this txn is going to set new programs (either for creation or
@@ -413,8 +413,8 @@ func ApplicationCall(ac transactions.ApplicationCallTxnFields, header transactio
if exists {
pass, evalDelta, err := balances.StatefulEval(gi, evalParams, appIdx, params.ClearStateProgram)
if err != nil {
- // Fail on non-logic eval errors and ignore LogicEvalError errors
- if _, ok := err.(ledgercore.LogicEvalError); !ok {
+ // ClearStateProgram evaluation can't make the txn fail.
+ if _, ok := err.(logic.EvalError); !ok {
return err
}
}
diff --git a/ledger/apply/application_test.go b/ledger/apply/application_test.go
index bc8eb74ae..37c132c13 100644
--- a/ledger/apply/application_test.go
+++ b/ledger/apply/application_test.go
@@ -546,7 +546,7 @@ func TestAppCallApplyCreate(t *testing.T) {
// so it will think the app doesn't exist
err = ApplicationCall(ac, h, b, ad, 0, &ep, txnCounter)
a.Error(err)
- a.Contains(err.Error(), "applications that do not exist")
+ a.Contains(err.Error(), "only ClearState is supported")
a.Equal(1, b.put)
a.Equal(1, b.putAppParams)
@@ -895,7 +895,7 @@ func TestAppCallClearState(t *testing.T) {
// one to opt out, one deallocate, no error from ApplicationCall
b.pass = true
b.delta = transactions.EvalDelta{GlobalDelta: nil}
- b.err = ledgercore.LogicEvalError{Err: fmt.Errorf("test error")}
+ b.err = logic.EvalError{Err: fmt.Errorf("test error")}
err = ApplicationCall(ac, h, b, ad, 0, &ep, txnCounter)
a.NoError(err)
a.Equal(1, b.put)
diff --git a/ledger/apply/apply.go b/ledger/apply/apply.go
index b694d9ed5..2be394b8f 100644
--- a/ledger/apply/apply.go
+++ b/ledger/apply/apply.go
@@ -30,6 +30,8 @@ type StateProofsApplier interface {
BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error)
GetStateProofNextRound() basics.Round
SetStateProofNextRound(rnd basics.Round)
+ GetStateProofVerificationContext(stateProofLastAttestedRound basics.Round) (*ledgercore.StateProofVerificationContext, error)
+ ConsensusParams() config.ConsensusParams
}
// Balances allow to move MicroAlgos from one address to another and to update balance records, or to access and modify individual balance records
diff --git a/ledger/apply/stateproof.go b/ledger/apply/stateproof.go
index dba21a724..2901f4e85 100644
--- a/ledger/apply/stateproof.go
+++ b/ledger/apply/stateproof.go
@@ -23,6 +23,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/stateproof/verify"
)
@@ -41,31 +42,51 @@ func StateProof(tx transactions.StateProofTxnFields, atRound basics.Round, sp St
}
lastRoundInInterval := basics.Round(tx.Message.LastAttestedRound)
- lastRoundHdr, err := sp.BlockHdr(lastRoundInInterval)
- if err != nil {
- return err
- }
-
nextStateProofRnd := sp.GetStateProofNextRound()
if nextStateProofRnd == 0 || nextStateProofRnd != lastRoundInInterval {
return fmt.Errorf("applyStateProof: %w - expecting state proof for %d, but new state proof is for %d",
ErrExpectedDifferentStateProofRound, nextStateProofRnd, lastRoundInInterval)
}
- proto := config.Consensus[lastRoundHdr.CurrentProtocol]
if validate {
- votersRnd := lastRoundInInterval.SubSaturate(basics.Round(proto.StateProofInterval))
- votersHdr, err := sp.BlockHdr(votersRnd)
+ var verificationContext *ledgercore.StateProofVerificationContext
+ var err error
+ if sp.ConsensusParams().StateProofUseTrackerVerification {
+ verificationContext, err = sp.GetStateProofVerificationContext(lastRoundInInterval)
+ } else {
+ verificationContext, err = gatherVerificationContextUsingBlockHeaders(sp, lastRoundInInterval)
+ }
if err != nil {
return err
}
- err = verify.ValidateStateProof(&lastRoundHdr, &tx.StateProof, &votersHdr, atRound, &tx.Message)
- if err != nil {
+ if err = verify.ValidateStateProof(verificationContext, &tx.StateProof, atRound, &tx.Message); err != nil {
return err
}
}
- sp.SetStateProofNextRound(lastRoundInInterval + basics.Round(proto.StateProofInterval))
+ // IMPORTANT: this line does not support changing the StateProofInterval consensus param;
+ // Ideally the protocol version should be taken from the votersHeader (or even the lastRoundInInterval header).
+ // However, when replaying the past 320 blocks we might not be able to fetch this header (only X+320+1000 past headers are available).
+ // So for now we will use the current protocol version parameter, and when support for changing StateProofInterval arises
+ // we shall revisit this decision.
+ sp.SetStateProofNextRound(lastRoundInInterval + basics.Round(sp.ConsensusParams().StateProofInterval))
return nil
}
+
+func gatherVerificationContextUsingBlockHeaders(sp StateProofsApplier, lastRoundInInterval basics.Round) (*ledgercore.StateProofVerificationContext, error) {
+ lastRoundHdr, err := sp.BlockHdr(lastRoundInInterval)
+ if err != nil {
+ return nil, err
+ }
+ proto := config.Consensus[lastRoundHdr.CurrentProtocol]
+ votersRnd := lastRoundInInterval.SubSaturate(basics.Round(proto.StateProofInterval))
+ votersHdr, err := sp.BlockHdr(votersRnd)
+ if err != nil {
+ return nil, err
+ }
+
+ verificationContext := ledgercore.MakeStateProofVerificationContext(&votersHdr, lastRoundInInterval)
+
+ return verificationContext, nil
+}
diff --git a/ledger/apply/stateproof_test.go b/ledger/apply/stateproof_test.go
new file mode 100644
index 000000000..155a4eef5
--- /dev/null
+++ b/ledger/apply/stateproof_test.go
@@ -0,0 +1,275 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package apply
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto/stateproof"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/stateproofmsg"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+var ErrVerificationContextNotFound = errors.New("requested state proof verification data not found")
+
+type stateProofApplierMock struct {
+ spNext basics.Round
+ blocks map[basics.Round]bookkeeping.BlockHeader
+ blockErr map[basics.Round]error
+ stateProofVerification map[basics.Round]*ledgercore.StateProofVerificationContext
+ version protocol.ConsensusVersion
+}
+
+func (s *stateProofApplierMock) BlockHdr(rnd basics.Round) (bookkeeping.BlockHeader, error) {
+ err, hit := s.blockErr[rnd]
+ if hit {
+ return bookkeeping.BlockHeader{}, err
+ }
+ hdr := s.blocks[rnd] // default struct is fine if nothing found
+ return hdr, nil
+}
+
+func (s *stateProofApplierMock) GetStateProofNextRound() basics.Round {
+ return s.spNext
+}
+
+func (s *stateProofApplierMock) SetStateProofNextRound(rnd basics.Round) {
+ s.spNext = rnd
+}
+
+func (s *stateProofApplierMock) GetStateProofVerificationContext(stateProofLastAttestedRound basics.Round) (*ledgercore.StateProofVerificationContext, error) {
+ element, exists := s.stateProofVerification[stateProofLastAttestedRound]
+ if !exists {
+ return nil, ErrVerificationContextNotFound
+ }
+ return element, nil
+}
+
+func (s *stateProofApplierMock) ConsensusParams() config.ConsensusParams {
+ return config.Consensus[s.version]
+}
+
+func TestApplyStateProofV34(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ var spType protocol.StateProofType
+ var stateProof stateproof.StateProof
+ var atRound basics.Round
+ var validate bool
+ msg := stateproofmsg.Message{}
+
+ const version = protocol.ConsensusV34
+
+ blocks := make(map[basics.Round]bookkeeping.BlockHeader)
+ blockErr := make(map[basics.Round]error)
+ applier := &stateProofApplierMock{
+ spNext: 0,
+ blocks: blocks,
+ blockErr: blockErr,
+ stateProofVerification: nil,
+ version: version,
+ }
+
+ spType = protocol.StateProofType(1234) // bad stateproof type
+ stateProofTx := transactions.StateProofTxnFields{
+ StateProofType: spType,
+ StateProof: stateProof,
+ Message: msg,
+ }
+ err := StateProof(stateProofTx, atRound, applier, validate)
+ a.ErrorIs(err, ErrStateProofTypeNotSupported)
+
+ stateProofTx.StateProofType = protocol.StateProofBasic
+ // stateproof txn doesn't confirm the next state proof round. expected is in the past
+ validate = true
+ stateProofTx.Message.LastAttestedRound = uint64(16)
+ applier.SetStateProofNextRound(8)
+ err = StateProof(stateProofTx, atRound, applier, validate)
+ a.ErrorIs(err, ErrExpectedDifferentStateProofRound)
+ applier.SetStateProofNextRound(32)
+
+ // stateproof txn doesn't confirm the next state proof round. expected is in the future
+ validate = true
+ stateProofTx.Message.LastAttestedRound = uint64(16)
+ applier.SetStateProofNextRound(32)
+ err = StateProof(stateProofTx, atRound, applier, validate)
+ a.ErrorIs(err, ErrExpectedDifferentStateProofRound)
+
+ // no atRound and lastAttested block
+ stateProofTx.Message.LastAttestedRound = 32
+ noBlockErr := errors.New("no block")
+ blockErr[atRound] = noBlockErr
+ blockErr[basics.Round(stateProofTx.Message.LastAttestedRound)] = noBlockErr
+ err = StateProof(stateProofTx, atRound, applier, validate)
+ a.ErrorIs(err, noBlockErr)
+ delete(blockErr, atRound)
+
+ atRoundBlock := bookkeeping.BlockHeader{}
+ atRoundBlock.CurrentProtocol = version
+ blocks[atRound] = atRoundBlock
+
+ // no spRnd block
+ noBlockErr = errors.New("no block")
+ blockErr[32] = noBlockErr
+ stateProofTx.Message.LastAttestedRound = 32
+ err = StateProof(stateProofTx, atRound, applier, validate)
+ a.ErrorIs(err, noBlockErr)
+
+ // no votersRnd block
+ // this is slightly a mess of things that don't quite line up with likely usage
+ validate = true
+ var spHdr bookkeeping.BlockHeader
+ spHdr.CurrentProtocol = "TestCowStateProof"
+ spHdr.Round = 1
+ proto := config.Consensus[spHdr.CurrentProtocol]
+ proto.StateProofInterval = 2
+ config.Consensus[spHdr.CurrentProtocol] = proto
+ blocks[spHdr.Round] = spHdr
+
+ spHdr.Round = 15
+ blocks[spHdr.Round] = spHdr
+ stateProofTx.Message.LastAttestedRound = uint64(spHdr.Round)
+ applier.SetStateProofNextRound(15)
+ blockErr[13] = noBlockErr
+ err = StateProof(stateProofTx, atRound, applier, validate)
+ a.Contains(err.Error(), "no block")
+ delete(blockErr, 13)
+
+ // check the happy flow - we should fail only on crypto
+ atRound = 800
+ spHdr = bookkeeping.BlockHeader{}
+ spHdr.CurrentProtocol = version
+ blocks[basics.Round(2*config.Consensus[version].StateProofInterval)] = spHdr
+
+ votersHdr := bookkeeping.BlockHeader{}
+ votersHdr.CurrentProtocol = version
+ stateproofTracking := bookkeeping.StateProofTrackingData{
+ StateProofVotersCommitment: []byte{0x1}[:],
+ StateProofOnlineTotalWeight: basics.MicroAlgos{Raw: 5},
+ }
+ votersHdr.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData)
+ votersHdr.StateProofTracking[protocol.StateProofBasic] = stateproofTracking
+
+ blocks[basics.Round(config.Consensus[version].StateProofInterval)] = votersHdr
+ atRoundBlock = bookkeeping.BlockHeader{}
+ atRoundBlock.CurrentProtocol = version
+ blocks[atRound] = atRoundBlock
+
+ stateProofTx.Message.LastAttestedRound = 2 * config.Consensus[version].StateProofInterval
+ stateProofTx.StateProof.SignedWeight = 100
+ applier.SetStateProofNextRound(basics.Round(2 * config.Consensus[version].StateProofInterval))
+
+ err = StateProof(stateProofTx, atRound, applier, validate)
+ a.Contains(err.Error(), "crypto error")
+}
+
+func TestApplyStateProof(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ var spType protocol.StateProofType
+ var stateProof stateproof.StateProof
+ atRound := basics.Round(600)
+ var validate bool
+ msg := stateproofmsg.Message{}
+
+ blocks := make(map[basics.Round]bookkeeping.BlockHeader)
+ blockErr := make(map[basics.Round]error)
+ stateProofVerification := make(map[basics.Round]*ledgercore.StateProofVerificationContext)
+ applier := &stateProofApplierMock{
+ spNext: 0,
+ blocks: blocks,
+ blockErr: blockErr,
+ stateProofVerification: stateProofVerification,
+ version: protocol.ConsensusCurrentVersion,
+ }
+
+ spType = protocol.StateProofType(1234) // bad stateproof type
+ stateProofTx := transactions.StateProofTxnFields{
+ StateProofType: spType,
+ StateProof: stateProof,
+ Message: msg,
+ }
+ err := StateProof(stateProofTx, atRound, applier, validate)
+ a.ErrorIs(err, ErrStateProofTypeNotSupported)
+
+ stateProofTx.StateProofType = protocol.StateProofBasic
+ // stateproof txn doesn't confirm the next state proof round. expected is in the past
+ validate = true
+ stateProofTx.Message.LastAttestedRound = uint64(16)
+ applier.SetStateProofNextRound(8)
+ err = StateProof(stateProofTx, atRound, applier, validate)
+ a.ErrorIs(err, ErrExpectedDifferentStateProofRound)
+ applier.SetStateProofNextRound(32)
+
+ // stateproof txn doesn't confirm the next state proof round. expected is in the future
+ validate = true
+ stateProofTx.Message.LastAttestedRound = uint64(16)
+ applier.SetStateProofNextRound(32)
+ err = StateProof(stateProofTx, atRound, applier, validate)
+ a.ErrorIs(err, ErrExpectedDifferentStateProofRound)
+
+ atRoundBlock := bookkeeping.BlockHeader{}
+ atRoundBlock.CurrentProtocol = protocol.ConsensusCurrentVersion
+ blocks[atRound] = atRoundBlock
+
+ validate = true
+ // no Verification Context for rounds 32
+ stateProofTx.Message.LastAttestedRound = 32
+ err = StateProof(stateProofTx, atRound, applier, validate)
+ a.ErrorIs(err, ErrVerificationContextNotFound)
+ delete(blockErr, atRound)
+
+ // the behavior has changed and we no longer require the voters blockheader to verify the transaction
+ // still, this test should assure the error returned is the one expected and not "no block"
+ noBlockErr := errors.New("no block")
+
+ // removing blocks for the ledger so if apply.stateproof uses the tracker it should pass
+ applier.SetStateProofNextRound(512)
+ blockErr[512] = noBlockErr
+ blockErr[256] = noBlockErr
+ stateProofTx.Message.LastAttestedRound = 512
+ stateProofTx.StateProof.SignedWeight = 100
+ stateProofVerification[basics.Round(stateProofTx.Message.LastAttestedRound)] = &ledgercore.StateProofVerificationContext{
+ LastAttestedRound: basics.Round(stateProofTx.Message.LastAttestedRound),
+ VotersCommitment: []byte{0x1}[:],
+ OnlineTotalWeight: basics.MicroAlgos{Raw: 5},
+ Version: protocol.ConsensusCurrentVersion,
+ }
+
+ // crypto verification should fail since it is not a valid stateproof
+ err = StateProof(stateProofTx, atRound, applier, validate)
+ a.Error(err)
+ a.Contains(err.Error(), "crypto error")
+
+ a.Equal(basics.Round(512), applier.GetStateProofNextRound())
+ // transaction should be applied without stateproof validation (no context, blockheader or valid stateproof needed as it represents a node catching up)
+ err = StateProof(stateProofTx, atRound, applier, false)
+ a.NoError(err)
+ // make sure that the StateProofNext was updated correctly after applying
+ a.Equal(basics.Round(512+config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval), applier.GetStateProofNextRound())
+}
diff --git a/ledger/apptxn_test.go b/ledger/apptxn_test.go
index fa840b40a..a2d7dba11 100644
--- a/ledger/apptxn_test.go
+++ b/ledger/apptxn_test.go
@@ -58,8 +58,6 @@ func TestPayAction(t *testing.T) {
itxn_submit
`))
- require.Equal(t, ai, basics.AppIndex(1))
-
payout1 := txntest.Txn{
Type: "appl",
Sender: addrs[1],
@@ -138,12 +136,11 @@ func TestPayAction(t *testing.T) {
for i := 1; i < 10; i++ {
dl.fullBlock()
}
- vb = dl.fullBlock(payout2.Noted("2"))
+ tib := dl.txn(payout2.Noted("2"))
afterpay := micros(dl.t, dl.validator, ai.Address())
- payInBlock = vb.Block().Payset[0]
- inners = payInBlock.ApplyData.EvalDelta.InnerTxns
+ inners = tib.ApplyData.EvalDelta.InnerTxns
require.Len(t, inners, 1)
appreward := inners[0].SenderRewards.Raw
@@ -159,26 +156,24 @@ func TestAxferAction(t *testing.T) {
t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- cfg := config.GetDefaultLocal()
- l := newSimpleLedgerWithConsensusVersion(t, genBalances, protocol.ConsensusFuture, cfg)
- defer l.Close()
+ // Inner txns start in v30
+ ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
+ dl := NewDoubleLedger(t, genBalances, cv, cfg)
+ defer dl.Close()
- asa := txntest.Txn{
- Type: "acfg",
- Sender: addrs[0],
- AssetParams: basics.AssetParams{
- Total: 1000000,
- Decimals: 3,
- UnitName: "oz",
- AssetName: "Gold",
- URL: "https://gold.rush/",
- },
- }
+ asa := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ AssetParams: basics.AssetParams{
+ Total: 1000000,
+ Decimals: 3,
+ UnitName: "oz",
+ AssetName: "Gold",
+ URL: "https://gold.rush/",
+ },
+ }
- app := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
+ source := main(`
itxn_begin
int axfer
itxn_field TypeEnum
@@ -207,145 +202,108 @@ skipamount:
txn Accounts 1
itxn_field AssetReceiver
submit: itxn_submit
-`),
- }
-
- eval := nextBlock(t, l)
- txns(t, l, eval, &asa, &app)
- vb := endBlock(t, l, eval)
+`)
- asaIndex := basics.AssetIndex(1)
- require.Equal(t, asaIndex, vb.Block().Payset[0].ApplyData.ConfigAsset)
- appIndex := basics.AppIndex(2)
- require.Equal(t, appIndex, vb.Block().Payset[1].ApplyData.ApplicationID)
-
- fund := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: appIndex.Address(),
- Amount: 300000, // account min balance, optin min balance, plus fees
+ asaID := dl.txn(&asa).ApplyData.ConfigAsset
+ // account min balance, optin min balance, plus fees
// stay under 1M, to avoid rewards complications
- }
+ appID := dl.fundedApp(addrs[0], 300_000, source)
- eval = nextBlock(t, l)
- txn(t, l, eval, &fund)
- endBlock(t, l, eval)
+ fundgold := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[0],
+ XferAsset: asaID,
+ AssetReceiver: appID.Address(),
+ AssetAmount: 20000,
+ }
- fundgold := txntest.Txn{
- Type: "axfer",
- Sender: addrs[0],
- XferAsset: asaIndex,
- AssetReceiver: appIndex.Address(),
- AssetAmount: 20000,
- }
+ // Fail, because app account is not opted in.
+ dl.txn(&fundgold, fmt.Sprintf("asset %d missing", asaID))
- // Fail, because app account is not opted in.
- eval = nextBlock(t, l)
- txn(t, l, eval, &fundgold, fmt.Sprintf("asset %d missing", asaIndex))
- endBlock(t, l, eval)
+ amount, in := holding(t, dl.generator, appID.Address(), asaID)
+ require.False(t, in)
+ require.Zero(t, amount)
- amount, in := holding(t, l, appIndex.Address(), asaIndex)
- require.False(t, in)
- require.Equal(t, amount, uint64(0))
+ // Tell the app to opt itself in.
+ optin := txntest.Txn{
+ Type: "appl",
+ ApplicationID: appID,
+ Sender: addrs[0],
+ ApplicationArgs: [][]byte{[]byte("optin")},
+ ForeignAssets: []basics.AssetIndex{asaID},
+ }
+ dl.txn(&optin)
- optin := txntest.Txn{
- Type: "appl",
- ApplicationID: appIndex,
- Sender: addrs[0],
- ApplicationArgs: [][]byte{[]byte("optin")},
- ForeignAssets: []basics.AssetIndex{asaIndex},
- }
+ amount, in = holding(t, dl.generator, appID.Address(), asaID)
+ require.True(t, in)
+ require.Zero(t, amount)
- // Tell the app to opt itself in.
- eval = nextBlock(t, l)
- txn(t, l, eval, &optin)
- endBlock(t, l, eval)
+ // Now, succeed, because opted in.
+ dl.txn(&fundgold)
- amount, in = holding(t, l, appIndex.Address(), asaIndex)
- require.True(t, in)
- require.Equal(t, amount, uint64(0))
+ amount, in = holding(t, dl.generator, appID.Address(), asaID)
+ require.True(t, in)
+ require.Equal(t, uint64(20000), amount)
- // Now, succeed, because opted in.
- eval = nextBlock(t, l)
- txn(t, l, eval, &fundgold)
- endBlock(t, l, eval)
-
- amount, in = holding(t, l, appIndex.Address(), asaIndex)
- require.True(t, in)
- require.Equal(t, amount, uint64(20000))
+ withdraw := txntest.Txn{
+ Type: "appl",
+ ApplicationID: appID,
+ Sender: addrs[0],
+ ApplicationArgs: [][]byte{[]byte("withdraw")},
+ ForeignAssets: []basics.AssetIndex{asaID},
+ Accounts: []basics.Address{addrs[0]},
+ }
+ dl.txn(&withdraw)
- withdraw := txntest.Txn{
- Type: "appl",
- ApplicationID: appIndex,
- Sender: addrs[0],
- ApplicationArgs: [][]byte{[]byte("withdraw")},
- ForeignAssets: []basics.AssetIndex{asaIndex},
- Accounts: []basics.Address{addrs[0]},
- }
- eval = nextBlock(t, l)
- txn(t, l, eval, &withdraw)
- endBlock(t, l, eval)
+ amount, in = holding(t, dl.generator, appID.Address(), asaID)
+ require.True(t, in)
+ require.Equal(t, uint64(10000), amount)
- amount, in = holding(t, l, appIndex.Address(), asaIndex)
- require.True(t, in)
- require.Equal(t, amount, uint64(10000))
+ dl.txn(withdraw.Noted("2"))
- eval = nextBlock(t, l)
- txn(t, l, eval, withdraw.Noted("2"))
- endBlock(t, l, eval)
+ amount, in = holding(t, dl.generator, appID.Address(), asaID)
+ require.True(t, in) // Zero left, but still opted in
+ require.Zero(t, amount)
- amount, in = holding(t, l, appIndex.Address(), asaIndex)
- require.True(t, in) // Zero left, but still opted in
- require.Equal(t, amount, uint64(0))
+ dl.txn(withdraw.Noted("3"), "underflow on subtracting")
- eval = nextBlock(t, l)
- txn(t, l, eval, withdraw.Noted("3"), "underflow on subtracting")
- endBlock(t, l, eval)
+ amount, in = holding(t, dl.generator, appID.Address(), asaID)
+ require.True(t, in) // Zero left, but still opted in
+ require.Zero(t, amount)
- amount, in = holding(t, l, appIndex.Address(), asaIndex)
- require.True(t, in) // Zero left, but still opted in
- require.Equal(t, amount, uint64(0))
+ close := txntest.Txn{
+ Type: "appl",
+ ApplicationID: appID,
+ Sender: addrs[0],
+ ApplicationArgs: [][]byte{[]byte("close")},
+ ForeignAssets: []basics.AssetIndex{asaID},
+ Accounts: []basics.Address{addrs[0]},
+ }
- close := txntest.Txn{
- Type: "appl",
- ApplicationID: appIndex,
- Sender: addrs[0],
- ApplicationArgs: [][]byte{[]byte("close")},
- ForeignAssets: []basics.AssetIndex{asaIndex},
- Accounts: []basics.Address{addrs[0]},
- }
+ dl.txn(&close)
- eval = nextBlock(t, l)
- txn(t, l, eval, &close)
- endBlock(t, l, eval)
+ amount, in = holding(t, dl.generator, appID.Address(), asaID)
+ require.False(t, in) // Zero left, not opted in
+ require.Zero(t, amount)
- amount, in = holding(t, l, appIndex.Address(), asaIndex)
- require.False(t, in) // Zero left, not opted in
- require.Equal(t, amount, uint64(0))
+ // Now, fail again, opted out
+ dl.txn(fundgold.Noted("2"), fmt.Sprintf("asset %d missing", asaID))
- // Now, fail again, opted out
- eval = nextBlock(t, l)
- txn(t, l, eval, fundgold.Noted("2"), fmt.Sprintf("asset %d missing", asaIndex))
- endBlock(t, l, eval)
+ // Do it all again, so we can test closeTo when we have a non-zero balance
+ // Tell the app to opt itself in.
+ dl.txns(optin.Noted("a"), fundgold.Noted("a"))
- // Do it all again, so we can test closeTo when we have a non-zero balance
- // Tell the app to opt itself in.
- eval = nextBlock(t, l)
- txns(t, l, eval, optin.Noted("a"), fundgold.Noted("a"))
- endBlock(t, l, eval)
+ amount, _ = holding(t, dl.generator, appID.Address(), asaID)
+ require.Equal(t, uint64(20000), amount)
+ left, _ := holding(t, dl.generator, addrs[0], asaID)
- amount, _ = holding(t, l, appIndex.Address(), asaIndex)
- require.Equal(t, uint64(20000), amount)
- left, _ := holding(t, l, addrs[0], asaIndex)
+ dl.txn(close.Noted("a"))
- eval = nextBlock(t, l)
- txn(t, l, eval, close.Noted("a"))
- endBlock(t, l, eval)
-
- amount, _ = holding(t, l, appIndex.Address(), asaIndex)
- require.Equal(t, uint64(0), amount)
- back, _ := holding(t, l, addrs[0], asaIndex)
- require.Equal(t, uint64(20000), back-left)
+ amount, _ = holding(t, dl.generator, appID.Address(), asaID)
+ require.Zero(t, amount)
+ back, _ := holding(t, dl.generator, addrs[0], asaID)
+ require.Equal(t, uint64(20000), back-left)
+ })
}
// TestClawbackAction ensures an app address can act as clawback address.
@@ -354,84 +312,66 @@ func TestClawbackAction(t *testing.T) {
t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
- asaIndex := basics.AssetIndex(1)
- appIndex := basics.AppIndex(2)
-
- asa := txntest.Txn{
- Type: "acfg",
- Sender: addrs[0],
- AssetParams: basics.AssetParams{
- Total: 1000000,
- Decimals: 3,
- UnitName: "oz",
- AssetName: "Gold",
- URL: "https://gold.rush/",
- Clawback: appIndex.Address(),
- },
- }
+ // 31 allowed inner appl.
+ ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
+ dl := NewDoubleLedger(t, genBalances, cv, cfg)
+ defer dl.Close()
- app := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
+ app := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: main(`
itxn_begin
-
- int axfer
- itxn_field TypeEnum
-
- txn Assets 0
- itxn_field XferAsset
-
- txn Accounts 1
- itxn_field AssetSender
-
- txn Accounts 2
- itxn_field AssetReceiver
-
- int 1000
- itxn_field AssetAmount
-
+ int axfer; itxn_field TypeEnum
+ txn Assets 0; itxn_field XferAsset
+ txn Accounts 1; itxn_field AssetSender
+ txn Accounts 2; itxn_field AssetReceiver
+ int 1000; itxn_field AssetAmount
itxn_submit
`),
- }
+ }
+ appID := dl.txn(&app).ApplyData.ApplicationID
- optin := txntest.Txn{
- Type: "axfer",
- Sender: addrs[1],
- AssetReceiver: addrs[1],
- XferAsset: asaIndex,
- }
- eval := nextBlock(t, l)
- txns(t, l, eval, &asa, &app, &optin)
- vb := endBlock(t, l, eval)
+ asa := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ AssetParams: basics.AssetParams{
+ Total: 1005,
+ Clawback: appID.Address(),
+ },
+ }
+ asaID := dl.txn(&asa).ApplyData.ConfigAsset
- require.Equal(t, asaIndex, vb.Block().Payset[0].ApplyData.ConfigAsset)
- require.Equal(t, appIndex, vb.Block().Payset[1].ApplyData.ApplicationID)
+ optin := txntest.Txn{
+ Type: "axfer",
+ Sender: addrs[1],
+ AssetReceiver: addrs[1],
+ XferAsset: asaID,
+ }
+ dl.txn(&optin)
- bystander := addrs[2] // Has no authority of its own
- overpay := txntest.Txn{
- Type: "pay",
- Sender: bystander,
- Receiver: bystander,
- Fee: 2000, // Overpay fee so that app account can be unfunded
- }
- clawmove := txntest.Txn{
- Type: "appl",
- Sender: bystander,
- ApplicationID: appIndex,
- ForeignAssets: []basics.AssetIndex{asaIndex},
- Accounts: []basics.Address{addrs[0], addrs[1]},
- }
- eval = nextBlock(t, l)
- err := txgroup(t, l, eval, &overpay, &clawmove)
- require.NoError(t, err)
- endBlock(t, l, eval)
+ bystander := addrs[2] // Has no authority of its own
+ overpay := txntest.Txn{
+ Type: "pay",
+ Sender: bystander,
+ Receiver: bystander,
+ Fee: 2000, // Overpay fee so that app account can be unfunded
+ }
+ clawmove := txntest.Txn{
+ Type: "appl",
+ Sender: bystander,
+ ApplicationID: appID,
+ ForeignAssets: []basics.AssetIndex{asaID},
+ Accounts: []basics.Address{addrs[0], addrs[1]},
+ }
+ dl.txgroup("", &overpay, &clawmove)
- amount, _ := holding(t, l, addrs[1], asaIndex)
- require.Equal(t, amount, uint64(1000))
+ amount, _ := holding(t, dl.generator, addrs[1], asaID)
+ require.EqualValues(t, 1000, amount)
+ amount, _ = holding(t, dl.generator, addrs[0], asaID)
+ require.EqualValues(t, 5, amount)
+ })
}
// TestRekeyAction ensures an app can transact for a rekeyed account
@@ -440,102 +380,89 @@ func TestRekeyAction(t *testing.T) {
t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
- appIndex := basics.AppIndex(1)
- ezpayer := txntest.Txn{
- Type: "appl",
- Sender: addrs[5],
- ApprovalProgram: main(`
+ // 30 allowed inner txns.
+ ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
+ dl := NewDoubleLedger(t, genBalances, cv, cfg)
+ defer dl.Close()
+
+ ezpayer := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[5],
+ ApprovalProgram: main(`
itxn_begin
- int pay
- itxn_field TypeEnum
- int 5000
- itxn_field Amount
- txn Accounts 1
- itxn_field Sender
- txn Accounts 2
- itxn_field Receiver
- txn NumAccounts
- int 3
- ==
- bz skipclose
- txn Accounts 3
- itxn_field CloseRemainderTo
+ int pay; itxn_field TypeEnum
+ int 5000; itxn_field Amount
+ txn Accounts 1; itxn_field Sender
+ txn Accounts 2; itxn_field Receiver
+ txn NumAccounts
+ int 3
+ ==
+ bz skipclose
+ txn Accounts 3; itxn_field CloseRemainderTo
skipclose:
itxn_submit
`),
- }
+ }
+ appID := dl.txn(&ezpayer).ApplyData.ApplicationID
- rekey := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: addrs[0],
- RekeyTo: appIndex.Address(),
- }
+ rekey := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: addrs[0],
+ RekeyTo: appID.Address(),
+ }
- eval := nextBlock(t, l)
- txns(t, l, eval, &ezpayer, &rekey)
- endBlock(t, l, eval)
+ dl.txn(&rekey)
- useacct := txntest.Txn{
- Type: "appl",
- Sender: addrs[1],
- ApplicationID: appIndex,
- Accounts: []basics.Address{addrs[0], addrs[2]}, // pay 2 from 0 (which was rekeyed)
- }
- eval = nextBlock(t, l)
- txn(t, l, eval, &useacct)
- endBlock(t, l, eval)
+ useacct := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appID,
+ Accounts: []basics.Address{addrs[0], addrs[2]}, // pay 2 from 0 (which was rekeyed)
+ }
+ dl.txn(&useacct)
- // App was never funded (didn't spend from it's own acct)
- require.Equal(t, uint64(0), micros(t, l, basics.AppIndex(1).Address()))
- // addrs[2] got paid
- require.Equal(t, uint64(5000), micros(t, l, addrs[2])-micros(t, l, addrs[6]))
- // addrs[0] paid 5k + rekey fee + inner txn fee
- require.Equal(t, uint64(7000), micros(t, l, addrs[6])-micros(t, l, addrs[0]))
+ // App was never funded (didn't spend from it's own acct)
+ require.Zero(t, micros(t, dl.generator, appID.Address()))
+ // addrs[2] got paid
+ require.Equal(t, uint64(5000), micros(t, dl.generator, addrs[2])-micros(t, dl.generator, addrs[6]))
+ // addrs[0] paid 5k + rekey fee + inner txn fee
+ require.Equal(t, uint64(7000), micros(t, dl.generator, addrs[6])-micros(t, dl.generator, addrs[0]))
- baduse := txntest.Txn{
- Type: "appl",
- Sender: addrs[1],
- ApplicationID: appIndex,
- Accounts: []basics.Address{addrs[2], addrs[0]}, // pay 0 from 2
- }
- eval = nextBlock(t, l)
- txn(t, l, eval, &baduse, "unauthorized")
- endBlock(t, l, eval)
+ baduse := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appID,
+ Accounts: []basics.Address{addrs[2], addrs[0]}, // pay 0 from 2
+ }
+ dl.txn(&baduse, "unauthorized")
- // Now, we close addrs[0], which wipes its rekey status. Reopen
- // it, and make sure the app can't spend.
+ // Now, we close addrs[0], which wipes its rekey status. Reopen
+ // it, and make sure the app can't spend.
- close := txntest.Txn{
- Type: "appl",
- Sender: addrs[1],
- ApplicationID: appIndex,
- Accounts: []basics.Address{addrs[0], addrs[2], addrs[3]}, // close to 3
- }
- eval = nextBlock(t, l)
- txn(t, l, eval, &close)
- endBlock(t, l, eval)
+ close := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appID,
+ Accounts: []basics.Address{addrs[0], addrs[2], addrs[3]}, // close to 3
+ }
+ dl.txn(&close)
- require.Equal(t, uint64(0), micros(t, l, addrs[0]))
+ require.Zero(t, micros(t, dl.generator, addrs[0]))
- payback := txntest.Txn{
- Type: "pay",
- Sender: addrs[3],
- Receiver: addrs[0],
- Amount: 10_000_000,
- }
- eval = nextBlock(t, l)
- txn(t, l, eval, &payback)
- endBlock(t, l, eval)
+ payback := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[3],
+ Receiver: addrs[0],
+ Amount: 10_000_000,
+ }
+ dl.txn(&payback)
- require.Equal(t, uint64(10_000_000), micros(t, l, addrs[0]))
+ require.Equal(t, uint64(10_000_000), micros(t, dl.generator, addrs[0]))
- eval = nextBlock(t, l)
- txn(t, l, eval, useacct.Noted("2"), "unauthorized")
- endBlock(t, l, eval)
+ dl.txn(useacct.Noted("2"), "unauthorized")
+ })
}
// TestRekeyActionCloseAccount ensures closing and reopening a rekeyed account in a single app call
@@ -545,74 +472,58 @@ func TestRekeyActionCloseAccount(t *testing.T) {
t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
- appIndex := basics.AppIndex(1)
- create := txntest.Txn{
- Type: "appl",
- Sender: addrs[5],
- ApprovalProgram: main(`
- // close account 1
+ // 30 allowed inner txs.
+ ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
+ dl := NewDoubleLedger(t, genBalances, cv, cfg)
+ defer dl.Close()
+
+ // use addrs[5] for creation, so addr[0] will be closeable
+ appID := dl.fundedApp(addrs[5], 1_000_000,
+ main(`
+ // pay from, and close, account 1
itxn_begin
- int pay
- itxn_field TypeEnum
- txn Accounts 1
- itxn_field Sender
- txn Accounts 2
- itxn_field CloseRemainderTo
+ int pay; itxn_field TypeEnum
+ txn Accounts 1; itxn_field Sender
+ txn Accounts 2; itxn_field CloseRemainderTo
itxn_submit
// reopen account 1
itxn_begin
- int pay
- itxn_field TypeEnum
- int 5000
- itxn_field Amount
- txn Accounts 1
- itxn_field Receiver
+ int pay; itxn_field TypeEnum
+ int 5000; itxn_field Amount
+ txn Accounts 1; itxn_field Receiver
itxn_submit
+
// send from account 1 again (should fail because closing an account erases rekeying)
itxn_begin
- int pay
- itxn_field TypeEnum
- int 1
- itxn_field Amount
- txn Accounts 1
- itxn_field Sender
- txn Accounts 2
- itxn_field Receiver
+ int pay; itxn_field TypeEnum
+ int 1; itxn_field Amount
+ txn Accounts 1; itxn_field Sender
+ txn Accounts 2; itxn_field Receiver
itxn_submit
-`),
- }
+`))
- rekey := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: addrs[0],
- RekeyTo: appIndex.Address(),
- }
-
- fund := txntest.Txn{
- Type: "pay",
- Sender: addrs[1],
- Receiver: appIndex.Address(),
- Amount: 1_000_000,
- }
-
- eval := nextBlock(t, l)
- txns(t, l, eval, &create, &rekey, &fund)
- endBlock(t, l, eval)
+ // rekey addr[1] to the app
+ dl.txn(&txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: addrs[0],
+ RekeyTo: appID.Address(),
+ })
- useacct := txntest.Txn{
- Type: "appl",
- Sender: addrs[1],
- ApplicationID: appIndex,
- Accounts: []basics.Address{addrs[0], addrs[2]},
- }
- eval = nextBlock(t, l)
- txn(t, l, eval, &useacct, "unauthorized")
- endBlock(t, l, eval)
+ useacct := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appID,
+ Accounts: []basics.Address{addrs[0], addrs[2]},
+ }
+ dl.txn(&useacct, "unauthorized")
+ // do it again, to ensure the lack of authorization is in the right
+ // place, by matching on the opcode that comes before the itxn_submit we
+ // want to know failed (it'll be in the error).
+ dl.txn(&useacct, "itxn_field Receiver")
+ })
}
// TestDuplicatePayAction shows two pays with same parameters can be done as inner tarnsactions
@@ -621,74 +532,52 @@ func TestDuplicatePayAction(t *testing.T) {
t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
+ // Inner txns start in v30
+ ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
+ dl := NewDoubleLedger(t, genBalances, cv, cfg)
+ defer dl.Close()
- appIndex := basics.AppIndex(1)
- create := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
+ source := main(`
itxn_begin
- int pay
- itxn_field TypeEnum
- int 5000
- itxn_field Amount
- txn Accounts 1
- itxn_field Receiver
+ int pay; itxn_field TypeEnum
+ int 5000; itxn_field Amount
+ txn Accounts 1; itxn_field Receiver
itxn_submit
itxn_begin
- int pay
- itxn_field TypeEnum
- int 5000
- itxn_field Amount
- txn Accounts 1
- itxn_field Receiver
+ int pay; itxn_field TypeEnum
+ int 5000; itxn_field Amount
+ txn Accounts 1; itxn_field Receiver
itxn_submit
-`),
- }
+`)
+ appID := dl.fundedApp(addrs[0], 200_000, source)
- fund := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: appIndex.Address(),
- Amount: 200000, // account min balance, plus fees
- }
-
- paytwice := txntest.Txn{
- Type: "appl",
- Sender: addrs[1],
- ApplicationID: appIndex,
- Accounts: []basics.Address{addrs[1]}, // pay self
- }
-
- eval := nextBlock(t, l)
- txns(t, l, eval, &create, &fund, &paytwice, create.Noted("in same block"))
- vb := endBlock(t, l, eval)
-
- require.Equal(t, appIndex, vb.Block().Payset[0].ApplyData.ApplicationID)
- require.Equal(t, 4, len(vb.Block().Payset))
- // create=1, fund=2, payTwice=3,4,5
- require.Equal(t, basics.AppIndex(6), vb.Block().Payset[3].ApplyData.ApplicationID)
+ paytwice := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appID,
+ Accounts: []basics.Address{addrs[1]}, // pay self
+ }
- ad0 := micros(t, l, addrs[0])
- ad1 := micros(t, l, addrs[1])
- app := micros(t, l, appIndex.Address())
+ dl.txn(&paytwice)
+ copyID := dl.fundedApp(addrs[0], 200_000, source)
+ require.Equal(t, appID+5, copyID) // 4 between (fund, outer, two innner pays)
- // create(1000) and fund(1000 + 200000), extra create (1000)
- require.Equal(t, 203000, int(genBalances.Balances[addrs[0]].MicroAlgos.Raw-ad0))
- // paid 10000, but 1000 fee on tx
- require.Equal(t, 9000, int(ad1-genBalances.Balances[addrs[1]].MicroAlgos.Raw))
- // app still has 188000 (paid out 10000, and paid 2 x fee to do it)
- require.Equal(t, 188000, int(app))
+ ad0 := micros(t, dl.generator, addrs[0])
+ ad1 := micros(t, dl.generator, addrs[1])
+ app := micros(t, dl.generator, appID.Address())
- // Now create another app, and see if it gets the index we expect.
- eval = nextBlock(t, l)
- txns(t, l, eval, create.Noted("again"))
- vb = endBlock(t, l, eval)
+ // create(1000) and fund(1000 + 200000), extra create+fund (1000 + 201000)
+ require.Equal(t, 404000, int(genBalances.Balances[addrs[0]].MicroAlgos.Raw-ad0))
+ // paid 10000, but 1000 fee on tx
+ require.Equal(t, 9000, int(ad1-genBalances.Balances[addrs[1]].MicroAlgos.Raw))
+ // app still has 188000 (paid out 10000, and paid 2 x fee to do it)
+ require.Equal(t, 188000, int(app))
- // create=1, fund=2, payTwice=3,4,5, insameblock=6
- require.Equal(t, basics.AppIndex(7), vb.Block().Payset[0].ApplyData.ApplicationID)
+ // Now create another app, and see if it gets the ID we expect (2
+ // higher, because of the intervening fund txn)
+ finalID := dl.fundedApp(addrs[0], 200_000, source)
+ require.Equal(t, copyID+2, finalID)
+ })
}
// TestInnerTxCount ensures that inner transactions increment the TxnCounter
@@ -697,13 +586,14 @@ func TestInnerTxnCount(t *testing.T) {
t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
- create := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
+ // 30 allowed inner txs.
+ ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
+ dl := NewDoubleLedger(t, genBalances, cv, cfg)
+ defer dl.Close()
+
+ appID := dl.fundedApp(addrs[0], 200000, // account min balance, plus fees
+ main(`
itxn_begin
int pay
itxn_field TypeEnum
@@ -712,32 +602,20 @@ func TestInnerTxnCount(t *testing.T) {
txn Accounts 1
itxn_field Receiver
itxn_submit
-`),
- }
-
- fund := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: basics.AppIndex(1).Address(),
- Amount: 200000, // account min balance, plus fees
- }
-
- payout1 := txntest.Txn{
- Type: "appl",
- Sender: addrs[1],
- ApplicationID: basics.AppIndex(1),
- Accounts: []basics.Address{addrs[1]}, // pay self
- }
+`))
- eval := nextBlock(t, l)
- txns(t, l, eval, &create, &fund)
- vb := endBlock(t, l, eval)
- require.Equal(t, 2, int(vb.Block().TxnCounter))
+ payout1 := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appID,
+ Accounts: []basics.Address{addrs[1]}, // pay self
+ }
- eval = nextBlock(t, l)
- txns(t, l, eval, &payout1)
- vb = endBlock(t, l, eval)
- require.Equal(t, 4, int(vb.Block().TxnCounter))
+ vb := dl.fullBlock(&payout1)
+ before := vb.Block().TxnCounter
+ vb = dl.fullBlock(payout1.Noted("again"))
+ require.Equal(t, before+2, vb.Block().TxnCounter)
+ })
}
// TestAcfgAction ensures assets can be created and configured in teal
@@ -746,14 +624,14 @@ func TestAcfgAction(t *testing.T) {
t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
- appIndex := basics.AppIndex(1)
- app := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
+ // 30 allowed inner txs.
+ ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
+ dl := NewDoubleLedger(t, genBalances, cv, cfg)
+ defer dl.Close()
+
+ appID := dl.fundedApp(addrs[0], 200_000, // exactly account min balance + one asset
+ main(`
itxn_begin
int acfg
itxn_field TypeEnum
@@ -845,73 +723,59 @@ clawback:
b submit
error: err
submit: itxn_submit
-`),
- }
-
- fund := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: appIndex.Address(),
- Amount: 200_000, // exactly account min balance + one asset
- }
-
- eval := nextBlock(t, l)
- txns(t, l, eval, &app, &fund)
- endBlock(t, l, eval)
-
- createAsa := txntest.Txn{
- Type: "appl",
- Sender: addrs[1],
- ApplicationID: appIndex,
- ApplicationArgs: [][]byte{[]byte("create")},
- }
+`))
- eval = nextBlock(t, l)
- // Can't create an asset if you have exactly 200,000 and need to pay fee
- txn(t, l, eval, &createAsa, "balance 199000 below min 200000")
- // fund it some more and try again
- txns(t, l, eval, fund.Noted("more!"), &createAsa)
- vb := endBlock(t, l, eval)
-
- asaIndex := vb.Block().Payset[1].EvalDelta.InnerTxns[0].ConfigAsset
- require.Equal(t, basics.AssetIndex(5), asaIndex)
-
- asaParams, err := asaParams(t, l, basics.AssetIndex(5))
- require.NoError(t, err)
+ createAsa := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appID,
+ ApplicationArgs: [][]byte{[]byte("create")},
+ }
- require.Equal(t, 1_000_000, int(asaParams.Total))
- require.Equal(t, 3, int(asaParams.Decimals))
- require.Equal(t, "oz", asaParams.UnitName)
- require.Equal(t, "Gold", asaParams.AssetName)
- require.Equal(t, "https://gold.rush/", asaParams.URL)
+ // Can't create an asset if you have exactly 200,000 and need to pay fee
+ dl.txn(&createAsa, "balance 199000 below min 200000")
+ // add some more
+ dl.txn(&txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: appID.Address(),
+ Amount: 10_000,
+ })
+ asaID := dl.txn(&createAsa).EvalDelta.InnerTxns[0].ConfigAsset
+ require.NotZero(t, asaID)
- require.Equal(t, appIndex.Address(), asaParams.Manager)
+ asaParams, err := asaParams(t, dl.generator, asaID)
+ require.NoError(t, err)
- for _, a := range []string{"reserve", "freeze", "clawback", "manager"} {
- check := txntest.Txn{
+ require.Equal(t, 1_000_000, int(asaParams.Total))
+ require.Equal(t, 3, int(asaParams.Decimals))
+ require.Equal(t, "oz", asaParams.UnitName)
+ require.Equal(t, "Gold", asaParams.AssetName)
+ require.Equal(t, "https://gold.rush/", asaParams.URL)
+
+ require.Equal(t, appID.Address(), asaParams.Manager)
+
+ for _, a := range []string{"reserve", "freeze", "clawback", "manager"} {
+ check := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appID,
+ ApplicationArgs: [][]byte{[]byte(a), []byte("junkjunkjunkjunkjunkjunkjunkjunk")},
+ ForeignAssets: []basics.AssetIndex{asaID},
+ }
+ t.Log(a)
+ dl.txn(&check)
+ }
+ // Not the manager anymore so this won't work
+ nodice := txntest.Txn{
Type: "appl",
Sender: addrs[1],
- ApplicationID: appIndex,
- ApplicationArgs: [][]byte{[]byte(a), []byte("junkjunkjunkjunkjunkjunkjunkjunk")},
- ForeignAssets: []basics.AssetIndex{asaIndex},
+ ApplicationID: appID,
+ ApplicationArgs: [][]byte{[]byte("freeze"), []byte("junkjunkjunkjunkjunkjunkjunkjunk")},
+ ForeignAssets: []basics.AssetIndex{asaID},
}
- eval = nextBlock(t, l)
- t.Log(a)
- txn(t, l, eval, &check)
- endBlock(t, l, eval)
- }
- // Not the manager anymore so this won't work
- nodice := txntest.Txn{
- Type: "appl",
- Sender: addrs[1],
- ApplicationID: appIndex,
- ApplicationArgs: [][]byte{[]byte("freeze"), []byte("junkjunkjunkjunkjunkjunkjunkjunk")},
- ForeignAssets: []basics.AssetIndex{asaIndex},
- }
- eval = nextBlock(t, l)
- txn(t, l, eval, &nodice, "this transaction should be issued by the manager")
- endBlock(t, l, eval)
-
+ dl.txn(&nodice, "this transaction should be issued by the manager")
+ })
}
// TestAsaDuringInit ensures an ASA can be made while initilizing an
@@ -923,67 +787,62 @@ func TestAsaDuringInit(t *testing.T) {
t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
- appIndex := basics.AppIndex(2)
- prefund := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: appIndex.Address(),
- Amount: 300000, // plenty for min balances, fees
- }
+ // 30 allowed inner txs.
+ ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
+ dl := NewDoubleLedger(t, genBalances, cv, cfg)
+ defer dl.Close()
- app := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: `
+ appID := basics.AppIndex(2)
+ if ver >= 38 { // AppForbidLowResources
+ appID += 1000
+ }
+ prefund := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: appID.Address(),
+ Amount: 300000, // plenty for min balances, fees
+ }
+
+ app := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: `
itxn_begin
- int acfg
- itxn_field TypeEnum
- int 1000000
- itxn_field ConfigAssetTotal
- byte "oz"
- itxn_field ConfigAssetUnitName
- byte "Gold"
- itxn_field ConfigAssetName
+ int acfg; itxn_field TypeEnum
+ int 1000000; itxn_field ConfigAssetTotal
+ byte "oz"; itxn_field ConfigAssetUnitName
+ byte "Gold"; itxn_field ConfigAssetName
itxn_submit
itxn CreatedAssetID
- int 3
+ int ` + strconv.Itoa(int(appID+1)) + `
==
assert
- itxn CreatedApplicationID
- int 0
- ==
- assert
- itxn NumLogs
- int 0
- ==
-`,
- }
-
- eval := nextBlock(t, l)
- txns(t, l, eval, &prefund, &app)
- vb := endBlock(t, l, eval)
+ itxn CreatedApplicationID; int 0; ==; assert
+ itxn NumLogs; int 0; ==`,
+ }
- require.Equal(t, appIndex, vb.Block().Payset[1].ApplicationID)
+ payset := dl.txns(&prefund, &app)
+ require.Equal(t, appID, payset[1].ApplicationID)
- asaIndex := vb.Block().Payset[1].EvalDelta.InnerTxns[0].ConfigAsset
- require.Equal(t, basics.AssetIndex(3), asaIndex)
+ asaID := payset[1].EvalDelta.InnerTxns[0].ConfigAsset
+ require.EqualValues(t, appID+1, asaID)
+ })
}
-func TestRekey(t *testing.T) {
+func TestInnerRekey(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
- app := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
+ // 31 allowed inner rekeys.
+ ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
+ dl := NewDoubleLedger(t, genBalances, cv, cfg)
+ defer dl.Close()
+
+ appID := dl.fundedApp(addrs[0], 1_000_000,
+ main(`
itxn_begin
int pay
itxn_field TypeEnum
@@ -997,31 +856,167 @@ func TestRekey(t *testing.T) {
concat
itxn_field RekeyTo
itxn_submit
-`),
- }
+`))
+ require.NotZero(t, appID)
- eval := nextBlock(t, l)
- txns(t, l, eval, &app)
- vb := endBlock(t, l, eval)
- appIndex := vb.Block().Payset[0].ApplicationID
- require.Equal(t, basics.AppIndex(1), appIndex)
+ rekey := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appID,
+ }
+ dl.fullBlock(&rekey)
+ dl.txn(rekey.Noted("2"), "unauthorized")
+ })
+}
- fund := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: appIndex.Address(),
- Amount: 1_000_000,
- }
- rekey := txntest.Txn{
- Type: "appl",
- Sender: addrs[1],
- ApplicationID: appIndex,
- }
- eval = nextBlock(t, l)
- txns(t, l, eval, &fund, &rekey)
- txn(t, l, eval, rekey.Noted("2"), "unauthorized")
- endBlock(t, l, eval)
+// TestInnerAppCreateAndOptin tests a weird way to create an app and opt it into
+// an ASA all from one top-level transaction. Part of the trick is to use an
+// inner helper app. The app being created rekeys itself to the inner app,
+// which funds the outer app and opts it into the ASA. It could have worked
+// differently - the inner app could have just funded the outer app, and then
+// the outer app could have opted-in. But this technique tests something
+// interesting, that the inner app can perform an opt-in on the outer app, which
+// tests that the newly created app's holdings are available. In practice, the
+// helper shold rekey it back, but we don't bother here.
+func TestInnerAppCreateAndOptin(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // v31 allows inner appl and inner rekey
+ ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
+ dl := NewDoubleLedger(t, genBalances, cv, cfg)
+ defer dl.Close()
+
+ createasa := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ AssetParams: basics.AssetParams{Total: 2, UnitName: "$"},
+ }
+ asaID := dl.txn(&createasa).ApplyData.ConfigAsset
+ require.NotZero(t, asaID)
+
+ // helper app, is called during the creation of an app. When such an
+ // app is created, it rekeys itself to this helper and calls it. The
+ // helpers opts the caller into an ASA, and funds the MBR the caller
+ // needs for that optin.
+ helper := dl.fundedApp(addrs[0], 1_000_000,
+ main(`
+ itxn_begin
+ int axfer; itxn_field TypeEnum
+ int `+strconv.Itoa(int(asaID))+`; itxn_field XferAsset
+ txn Sender; itxn_field Sender // call as the caller! (works because of rekey by caller)
+ txn Sender; itxn_field AssetReceiver // 0 to self == opt-in
+ itxn_next
+ int pay; itxn_field TypeEnum // pay 200kmAlgo to the caller, for MBR
+ int 200000; itxn_field Amount
+ txn Sender; itxn_field Receiver
+ itxn_submit
+`))
+ // Don't use `main` here, we want to do the work during creation. Rekey
+ // to the helper and invoke it, trusting it to opt us into the ASA.
+ createapp := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ Fee: 3 * 1000, // to pay for self, call to helper, and helper's axfer
+ ApprovalProgram: `
+ itxn_begin
+ int appl; itxn_field TypeEnum
+ addr ` + helper.Address().String() + `; itxn_field RekeyTo
+ int ` + strconv.Itoa(int(helper)) + `; itxn_field ApplicationID
+ txn Assets 0; itxn_field Assets
+ itxn_submit
+ int 1
+`,
+ ForeignApps: []basics.AppIndex{helper},
+ ForeignAssets: []basics.AssetIndex{asaID},
+ }
+ appID := dl.txn(&createapp).ApplyData.ApplicationID
+ require.NotZero(t, appID)
+ })
+}
+
+// TestParentGlobals tests that a newly created app can call an inner app, and
+// the inner app will have access to the parent globals, even if the originally
+// created app ID isn't passed down, because the rule is that "pending" created
+// apps are available, starting from v38
+func TestParentGlobals(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+
+ // v38 allows parent access, but we start with v31 to make sure we don't mistakenly change it
+ ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
+ dl := NewDoubleLedger(t, genBalances, cv, cfg)
+ defer dl.Close()
+
+ // helper app, is called during the creation of an app. this app tries
+ // to access its parent's globals, by using `global CallerApplicationID`
+ helper := dl.fundedApp(addrs[0], 1_000_000,
+ main(`
+ global CallerApplicationID
+ byte "X"
+ app_global_get_ex; pop; pop; // we only care that it didn't panic
+`))
+
+ // Don't use `main` here, we want to do the work during creation.
+ createProgram := `
+ itxn_begin
+ int appl; itxn_field TypeEnum
+ int ` + strconv.Itoa(int(helper)) + `; itxn_field ApplicationID
+ itxn_submit
+ int 1
+`
+ createapp := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ Fee: 2 * 1000, // to pay for self and call to helper
+ ApprovalProgram: createProgram,
+ ForeignApps: []basics.AppIndex{helper},
+ }
+ var creator basics.AppIndex
+ if ver >= 38 {
+ creator = dl.txn(&createapp).ApplyData.ApplicationID
+ require.NotZero(t, creator)
+ } else {
+ dl.txn(&createapp, "unavailable App")
+ }
+
+ // Now, test the same pattern, but do it all inside of yet another outer
+ // app, to show that the parent is available even if it was, itself
+ // created as an inner. To do so, we also need to get 0.2 MBR to the
+ // outer app, since it will be creating the "middle" app.
+
+ outerAppAddress := (creator + 3).Address() // creator called an inner, so next is creator+2, then fund
+ outer := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ Fee: 3 * 1000, // to pay for self, call to inner create, and its call to helper
+ ApprovalProgram: `
+ itxn_begin
+ int appl; itxn_field TypeEnum
+ byte 0x` + hex.EncodeToString(createapp.SignedTxn().Txn.ApprovalProgram) + `; itxn_field ApprovalProgram
+ byte 0x` + hex.EncodeToString(createapp.SignedTxn().Txn.ClearStateProgram) + `; itxn_field ClearStateProgram
+ itxn_submit
+ int 1
+`,
+ ForeignApps: []basics.AppIndex{creator, helper},
+ }
+ fund := txntest.Txn{
+ Type: "pay",
+ Amount: 200_000,
+ Sender: addrs[0],
+ Receiver: outerAppAddress,
+ }
+ if ver >= 38 {
+ dl.txgroup("", &fund, &outer)
+ } else {
+ dl.txn(&createapp, "unavailable App")
+ }
+
+ })
}
func TestNote(t *testing.T) {
@@ -1029,13 +1024,14 @@ func TestNote(t *testing.T) {
t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
- app := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
+ // 31 allowed inner note setting.
+ ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
+ dl := NewDoubleLedger(t, genBalances, cv, cfg)
+ defer dl.Close()
+
+ appID := dl.fundedApp(addrs[0], 1_000_000,
+ main(`
itxn_begin
int pay
itxn_field TypeEnum
@@ -1046,31 +1042,17 @@ func TestNote(t *testing.T) {
byte "abcdefghijklmnopqrstuvwxyz01234567890"
itxn_field Note
itxn_submit
-`),
- }
+`))
- eval := nextBlock(t, l)
- txns(t, l, eval, &app)
- vb := endBlock(t, l, eval)
- appIndex := vb.Block().Payset[0].ApplicationID
- require.Equal(t, basics.AppIndex(1), appIndex)
+ note := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: appID,
+ }
- fund := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: appIndex.Address(),
- Amount: 1_000_000,
- }
- note := txntest.Txn{
- Type: "appl",
- Sender: addrs[1],
- ApplicationID: appIndex,
- }
- eval = nextBlock(t, l)
- txns(t, l, eval, &fund, &note)
- vb = endBlock(t, l, eval)
- alphabet := vb.Block().Payset[1].EvalDelta.InnerTxns[0].Txn.Note
- require.Equal(t, "abcdefghijklmnopqrstuvwxyz01234567890", string(alphabet))
+ alphabet := dl.txn(&note).EvalDelta.InnerTxns[0].Txn.Note
+ require.Equal(t, "abcdefghijklmnopqrstuvwxyz01234567890", string(alphabet))
+ })
}
func TestKeyreg(t *testing.T) {
@@ -1113,21 +1095,21 @@ nonpart:
eval := nextBlock(t, l)
txns(t, l, eval, &app)
vb := endBlock(t, l, eval)
- appIndex := vb.Block().Payset[0].ApplicationID
- require.Equal(t, basics.AppIndex(1), appIndex)
+ appID := vb.Block().Payset[0].ApplicationID
+ require.NotZero(t, appID)
// Give the app a lot of money
fund := txntest.Txn{
Type: "pay",
Sender: addrs[0],
- Receiver: appIndex.Address(),
+ Receiver: appID.Address(),
Amount: 1_000_000_000,
}
eval = nextBlock(t, l)
txn(t, l, eval, &fund)
endBlock(t, l, eval)
- require.Equal(t, 1_000_000_000, int(micros(t, l, appIndex.Address())))
+ require.Equal(t, 1_000_000_000, int(micros(t, l, appID.Address())))
// Build up Residue in RewardsState so it's ready to pay
for i := 1; i < 10; i++ {
@@ -1139,26 +1121,26 @@ nonpart:
pay := txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: appIndex,
+ ApplicationID: appID,
ApplicationArgs: [][]byte{[]byte("pay")},
}
eval = nextBlock(t, l)
txn(t, l, eval, &pay)
endBlock(t, l, eval)
// 2000 was earned in rewards (- 1000 fee, -1 pay)
- require.Equal(t, 1_000_000_999, int(micros(t, l, appIndex.Address())))
+ require.Equal(t, 1_000_000_999, int(micros(t, l, appID.Address())))
// Go nonpart
nonpart := txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: appIndex,
+ ApplicationID: appID,
ApplicationArgs: [][]byte{[]byte("nonpart")},
}
eval = nextBlock(t, l)
txn(t, l, eval, &nonpart)
endBlock(t, l, eval)
- require.Equal(t, 999_999_999, int(micros(t, l, appIndex.Address())))
+ require.Equal(t, 999_999_999, int(micros(t, l, appID.Address())))
// Build up Residue in RewardsState so it's ready to pay AGAIN
// But expect no rewards
@@ -1171,7 +1153,7 @@ nonpart:
txn(t, l, eval, nonpart.Noted("again"), "cannot change online/offline")
endBlock(t, l, eval)
// Paid fee + 1. Did not get rewards
- require.Equal(t, 999_998_998, int(micros(t, l, appIndex.Address())))
+ require.Equal(t, 999_998_998, int(micros(t, l, appID.Address())))
}
func TestInnerAppCall(t *testing.T) {
@@ -1199,7 +1181,7 @@ func TestInnerAppCall(t *testing.T) {
eval := nextBlock(t, l)
txn(t, l, eval, &app0)
vb := endBlock(t, l, eval)
- index0 := vb.Block().Payset[0].ApplicationID
+ id0 := vb.Block().Payset[0].ApplicationID
app1 := txntest.Txn{
Type: "appl",
@@ -1217,22 +1199,22 @@ func TestInnerAppCall(t *testing.T) {
eval = nextBlock(t, l)
txns(t, l, eval, &app1)
vb = endBlock(t, l, eval)
- index1 := vb.Block().Payset[0].ApplicationID
+ id1 := vb.Block().Payset[0].ApplicationID
fund0 := txntest.Txn{
Type: "pay",
Sender: addrs[0],
- Receiver: index0.Address(),
+ Receiver: id0.Address(),
Amount: 1_000_000_000,
}
fund1 := fund0
- fund1.Receiver = index1.Address()
+ fund1.Receiver = id1.Address()
call1 := txntest.Txn{
Type: "appl",
Sender: addrs[2],
- ApplicationID: index1,
- ForeignApps: []basics.AppIndex{index0},
+ ApplicationID: id1,
+ ForeignApps: []basics.AppIndex{id0},
}
eval = nextBlock(t, l)
txns(t, l, eval, &fund0, &fund1, &call1)
@@ -1247,15 +1229,17 @@ func TestInnerAppManipulate(t *testing.T) {
t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
- calleeIndex := basics.AppIndex(1)
- callee := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- // This app set a global key arg[1] to arg[2] or get arg[1] and log it
- ApprovalProgram: main(`
+ // 31 allowed inner appl.
+ ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
+ dl := NewDoubleLedger(t, genBalances, cv, cfg)
+ defer dl.Close()
+
+ callee := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ // This app set a global key arg[1] to arg[2] or get arg[1] and log it
+ ApprovalProgram: main(`
txn ApplicationArgs 0
byte "set"
==
@@ -1276,26 +1260,23 @@ next1:
next2:
err
`),
- GlobalStateSchema: basics.StateSchema{
- NumByteSlice: 1,
- },
- }
- fund := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: calleeIndex.Address(),
- Amount: 1_000_000,
- }
- eval := nextBlock(t, l)
- txns(t, l, eval, &callee, &fund)
- vb := endBlock(t, l, eval)
- require.Equal(t, calleeIndex, vb.Block().Payset[0].ApplicationID)
+ GlobalStateSchema: basics.StateSchema{
+ NumByteSlice: 1,
+ },
+ }
- callerIndex := basics.AppIndex(3)
- caller := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
+ calleeIndex := dl.txn(&callee).ApplyData.ApplicationID
+ require.NotZero(t, calleeIndex)
+
+ fund := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: calleeIndex.Address(),
+ Amount: 1_000_000,
+ }
+ dl.fullBlock(&fund)
+
+ callerIndex := dl.fundedApp(addrs[0], 1_000_000, main(`
itxn_begin
int appl
itxn_field TypeEnum
@@ -1313,37 +1294,28 @@ next2:
==
assert
b end
-`),
- }
- fund.Receiver = callerIndex.Address()
-
- eval = nextBlock(t, l)
- txns(t, l, eval, &caller, &fund)
- vb = endBlock(t, l, eval)
- require.Equal(t, callerIndex, vb.Block().Payset[0].ApplicationID)
+`))
- call := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApplicationID: callerIndex,
- ForeignApps: []basics.AppIndex{calleeIndex},
- }
- eval = nextBlock(t, l)
- txns(t, l, eval, &call)
- vb = endBlock(t, l, eval)
- tib := vb.Block().Payset[0]
- // No changes in the top-level EvalDelta
- require.Empty(t, tib.EvalDelta.GlobalDelta)
- require.Empty(t, tib.EvalDelta.LocalDeltas)
-
- inner := tib.EvalDelta.InnerTxns[0]
- require.Empty(t, inner.EvalDelta.LocalDeltas)
-
- require.Len(t, inner.EvalDelta.GlobalDelta, 1)
- require.Equal(t, basics.ValueDelta{
- Action: basics.SetBytesAction,
- Bytes: "A",
- }, inner.EvalDelta.GlobalDelta["X"])
+ call := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: callerIndex,
+ ForeignApps: []basics.AppIndex{calleeIndex},
+ }
+ tib := dl.txn(&call)
+ // No changes in the top-level EvalDelta
+ require.Empty(t, tib.EvalDelta.GlobalDelta)
+ require.Empty(t, tib.EvalDelta.LocalDeltas)
+
+ inner := tib.EvalDelta.InnerTxns[0]
+ require.Empty(t, inner.EvalDelta.LocalDeltas)
+
+ require.Len(t, inner.EvalDelta.GlobalDelta, 1)
+ require.Equal(t, basics.ValueDelta{
+ Action: basics.SetBytesAction,
+ Bytes: "A",
+ }, inner.EvalDelta.GlobalDelta["X"])
+ })
}
// TestCreateAndUse checks that an ASA can be created in an early tx, and then
@@ -1359,63 +1331,40 @@ func TestCreateAndUse(t *testing.T) {
dl := NewDoubleLedger(t, genBalances, cv, cfg)
defer dl.Close()
- createapp := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
+ appID := dl.fundedApp(addrs[0], 1_000_000, main(`
itxn_begin
- int axfer; itxn_field TypeEnum
- int 0; itxn_field Amount
- gaid 0; itxn_field XferAsset
- global CurrentApplicationAddress; itxn_field Sender
- global CurrentApplicationAddress; itxn_field AssetReceiver
+ int axfer; itxn_field TypeEnum
+ int 0; itxn_field Amount
+ gaid 0; itxn_field XferAsset
+ global CurrentApplicationAddress; itxn_field Sender
+ global CurrentApplicationAddress; itxn_field AssetReceiver
itxn_submit
-`),
- }
- appIndex := basics.AppIndex(1)
-
- fund := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: appIndex.Address(),
- Amount: 1_000_000,
- }
+`))
createasa := txntest.Txn{
Type: "acfg",
Sender: addrs[0],
AssetParams: basics.AssetParams{
- Total: 1000000,
- Decimals: 3,
- UnitName: "oz",
- AssetName: "Gold",
- URL: "https://gold.rush/",
+ Total: 1000000,
},
}
- asaIndex := basics.AssetIndex(3)
+ asaID := basics.AssetIndex(appID + 2) // accounts for intervening fund txn
use := txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: basics.AppIndex(1),
+ ApplicationID: appID,
// The point of this test is to show the following (psychic) setting is unnecessary.
- //ForeignAssets: []basics.AssetIndex{asaIndex},
+ //ForeignAssets: []basics.AssetIndex{asaID},
}
- dl.beginBlock()
- dl.txn(&createapp)
- dl.txn(&fund)
if ver == 30 {
- dl.txgroup("invalid Asset reference", &createasa, &use)
- dl.endBlock()
+ dl.txgroup("unavailable Asset", &createasa, &use)
return
}
// v31 onward, create & use works
- dl.txgroup("", &createasa, &use)
- vb := dl.endBlock()
-
- require.Equal(t, appIndex, vb.Block().Payset[0].ApplyData.ApplicationID)
- require.Equal(t, asaIndex, vb.Block().Payset[2].ApplyData.ConfigAsset)
+ payset := dl.txgroup("", &createasa, &use)
+ require.Equal(t, asaID, payset[0].ApplyData.ConfigAsset)
})
}
@@ -1424,31 +1373,21 @@ func TestGtxnEffects(t *testing.T) {
t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- // At 30 `gtxn CreatedAssetId is illegal, then from v31 it works.
+ // At 30 `gtxn CreatedAssetID` is illegal, then from v31 it works.
ledgertesting.TestConsensusRange(t, 30, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
dl := NewDoubleLedger(t, genBalances, cv, cfg)
defer dl.Close()
- createapp := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
+ // needed in very first app, so hardcode
+ asaID := basics.AssetIndex(3)
+ if ver >= 38 {
+ asaID += 1000
+ }
+ appID := dl.fundedApp(addrs[0], 1_000_000, main(`
gtxn 0 CreatedAssetID
- int 3
+ int `+strconv.Itoa(int(asaID))+`
==
- assert`),
- }
- appIndex := basics.AppIndex(1)
-
- fund := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: appIndex.Address(),
- Amount: 1_000_000,
- }
-
- dl.beginBlock()
- dl.txns(&createapp, &fund)
+ assert`))
createasa := txntest.Txn{
Type: "acfg",
@@ -1461,24 +1400,18 @@ func TestGtxnEffects(t *testing.T) {
URL: "https://gold.rush/",
},
}
- asaIndex := basics.AssetIndex(3)
-
see := txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: basics.AppIndex(1),
+ ApplicationID: appID,
}
if ver == 30 {
dl.txgroup("Unable to obtain effects from top-level transactions", &createasa, &see)
- dl.endBlock()
return
}
- dl.txgroup("", &createasa, &see)
- vb := dl.endBlock()
-
- require.Equal(t, appIndex, vb.Block().Payset[0].ApplyData.ApplicationID)
- require.Equal(t, asaIndex, vb.Block().Payset[2].ApplyData.ConfigAsset)
+ payset := dl.txgroup("", &createasa, &see)
+ require.Equal(t, asaID, payset[0].ApplyData.ConfigAsset)
})
}
@@ -1502,14 +1435,13 @@ func TestBasicReentry(t *testing.T) {
itxn_field ApplicationID
itxn_submit`),
}
- vb := dl.fullBlock(&app0)
- index0 := vb.Block().Payset[0].ApplicationID
+ id0 := dl.txn(&app0).ApplyData.ApplicationID
call1 := txntest.Txn{
Type: "appl",
Sender: addrs[2],
- ApplicationID: index0,
- ForeignApps: []basics.AppIndex{index0},
+ ApplicationID: id0,
+ ForeignApps: []basics.AppIndex{id0},
}
dl.txn(&call1, "self-call")
})
@@ -1540,12 +1472,12 @@ func TestIndirectReentry(t *testing.T) {
eval := nextBlock(t, l)
txn(t, l, eval, &app0)
vb := endBlock(t, l, eval)
- index0 := vb.Block().Payset[0].ApplicationID
+ id0 := vb.Block().Payset[0].ApplicationID
fund := txntest.Txn{
Type: "pay",
Sender: addrs[0],
- Receiver: index0.Address(),
+ Receiver: id0.Address(),
Amount: 1_000_000,
}
@@ -1564,13 +1496,13 @@ func TestIndirectReentry(t *testing.T) {
eval = nextBlock(t, l)
txns(t, l, eval, &app1, &fund)
vb = endBlock(t, l, eval)
- index1 := vb.Block().Payset[0].ApplicationID
+ id1 := vb.Block().Payset[0].ApplicationID
call1 := txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: index0,
- ForeignApps: []basics.AppIndex{index1, index0},
+ ApplicationID: id0,
+ ForeignApps: []basics.AppIndex{id1, id0},
}
eval = nextBlock(t, l)
txn(t, l, eval, &call1, "attempt to re-enter")
@@ -1612,12 +1544,12 @@ func TestValidAppReentry(t *testing.T) {
eval := nextBlock(t, l)
txn(t, l, eval, &app0)
vb := endBlock(t, l, eval)
- index0 := vb.Block().Payset[0].ApplicationID
+ id0 := vb.Block().Payset[0].ApplicationID
fund0 := txntest.Txn{
Type: "pay",
Sender: addrs[0],
- Receiver: index0.Address(),
+ Receiver: id0.Address(),
Amount: 1_000_000,
}
@@ -1634,7 +1566,7 @@ func TestValidAppReentry(t *testing.T) {
eval = nextBlock(t, l)
txns(t, l, eval, &app1, &fund0)
vb = endBlock(t, l, eval)
- index1 := vb.Block().Payset[0].ApplicationID
+ id1 := vb.Block().Payset[0].ApplicationID
app2 := txntest.Txn{
Type: "appl",
@@ -1651,12 +1583,12 @@ func TestValidAppReentry(t *testing.T) {
eval = nextBlock(t, l)
txn(t, l, eval, &app2)
vb = endBlock(t, l, eval)
- index2 := vb.Block().Payset[0].ApplicationID
+ id2 := vb.Block().Payset[0].ApplicationID
fund2 := txntest.Txn{
Type: "pay",
Sender: addrs[0],
- Receiver: index2.Address(),
+ Receiver: id2.Address(),
Amount: 1_000_000,
}
@@ -1667,8 +1599,8 @@ func TestValidAppReentry(t *testing.T) {
call1 := txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: index0,
- ForeignApps: []basics.AppIndex{index2, index1, index0},
+ ApplicationID: id0,
+ ForeignApps: []basics.AppIndex{id2, id1, id0},
}
eval = nextBlock(t, l)
txn(t, l, eval, &call1)
@@ -1715,13 +1647,12 @@ assert
Sender: addrs[0],
ApprovalProgram: main(program),
}
- vb := dl.fullBlock(&app0)
- index0 := vb.Block().Payset[0].ApplicationID
+ id0 := dl.txn(&app0).ApplyData.ApplicationID
fund0 := txntest.Txn{
Type: "pay",
Sender: addrs[0],
- Receiver: index0.Address(),
+ Receiver: id0.Address(),
Amount: 1_000_000,
}
@@ -1736,22 +1667,22 @@ assert
`),
}
- vb = dl.fullBlock(&app1, &fund0)
- index1 := vb.Block().Payset[0].ApplicationID
+ payset := dl.txns(&app1, &fund0)
+ id1 := payset[0].ApplicationID
callTxGroup := make([]*txntest.Txn, 16)
callTxGroup[0] = &txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: index0,
- ForeignApps: []basics.AppIndex{index1},
+ ApplicationID: id0,
+ ForeignApps: []basics.AppIndex{id1},
ApplicationArgs: [][]byte{{1, 0}}, // 256 inner calls
}
for i := 1; i < 16; i++ {
callTxGroup[i] = &txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: index1,
+ ApplicationID: id1,
Note: []byte{byte(i)},
}
}
@@ -1797,12 +1728,12 @@ assert
eval := nextBlock(t, l)
txn(t, l, eval, &app0)
vb := endBlock(t, l, eval)
- index0 := vb.Block().Payset[0].ApplicationID
+ id0 := vb.Block().Payset[0].ApplicationID
fund0 := txntest.Txn{
Type: "pay",
Sender: addrs[0],
- Receiver: index0.Address(),
+ Receiver: id0.Address(),
Amount: 1_000_000,
}
@@ -1819,13 +1750,13 @@ assert
eval = nextBlock(t, l)
txns(t, l, eval, &app1, &fund0)
vb = endBlock(t, l, eval)
- index1 := vb.Block().Payset[0].ApplicationID
+ id1 := vb.Block().Payset[0].ApplicationID
callTx := txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: index0,
- ForeignApps: []basics.AppIndex{index1},
+ ApplicationID: id0,
+ ForeignApps: []basics.AppIndex{id1},
}
eval = nextBlock(t, l)
@@ -1833,6 +1764,105 @@ assert
endBlock(t, l, eval)
}
+// TestSelfCheckHoldingNewApp checks whether a newly created app can check its
+// own holdings. There can't really be any value in it from before this group,
+// since it could not have opted in. But it should be legal to look.
+func TestSelfCheckHoldingNewApp(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+
+ // 31 allowed inner appls.
+ ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
+ dl := NewDoubleLedger(t, genBalances, cv, cfg)
+ defer dl.Close()
+
+ asset := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ ConfigAsset: 0,
+ AssetParams: basics.AssetParams{
+ Total: 10,
+ Decimals: 1,
+ UnitName: "X",
+ AssetName: "TEN",
+ },
+ }
+ assetID := dl.txn(&asset).ApplyData.ConfigAsset
+
+ selfcheck := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: `
+ global CurrentApplicationAddress
+ txn Assets 0
+ asset_holding_get AssetBalance
+ !; assert // is not opted in, so exists=0
+ ! // value is also 0
+`,
+ ForeignAssets: []basics.AssetIndex{assetID},
+ }
+ selfcheck.ApplicationID = dl.txn(&selfcheck).ApplicationID
+
+ dl.txn(&selfcheck)
+
+ })
+}
+
+// TestCheckHoldingNewApp checks whether a newly created app (account) can have
+// its holding value checked in a later txn. There can't really be any value in
+// it from before this group, since it could not have opted in. But it should be
+// legal to look.
+func TestCheckHoldingNewApp(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+
+ // 31 allowed inner appls.
+ ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
+ dl := NewDoubleLedger(t, genBalances, cv, cfg)
+ defer dl.Close()
+
+ asset := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ ConfigAsset: 0,
+ AssetParams: basics.AssetParams{
+ Total: 10,
+ Decimals: 1,
+ UnitName: "X",
+ AssetName: "TEN",
+ },
+ }
+ assetID := dl.txn(&asset).ApplyData.ConfigAsset
+
+ check := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: main(`
+ gaid 0
+ app_params_get AppAddress
+ assert
+ txn Assets 0
+ asset_holding_get AssetBalance
+ !; assert // is not opted in, so exists=0
+ !; assert // value is also 0
+`),
+ ForeignAssets: []basics.AssetIndex{assetID},
+ }
+ check.ApplicationID = dl.txn(&check).ApplyData.ApplicationID
+
+ create := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[1],
+ ApplicationID: 0,
+ }
+ dl.txgroup("", &create, &check)
+ })
+}
+
// TestInnerAppVersionCalling ensure that inner app calls must be the >=v6 apps
func TestInnerAppVersionCalling(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -1873,10 +1903,10 @@ func TestInnerAppVersionCalling(t *testing.T) {
ClearStateProgram: three.Program,
}
- vb := dl.fullBlock(&create5, &create6, &create5with3)
- v5id := vb.Block().Payset[0].ApplicationID
- v6id := vb.Block().Payset[1].ApplicationID
- v5withv3csp := vb.Block().Payset[2].ApplicationID
+ payset := dl.txns(&create5, &create6, &create5with3)
+ v5id := payset[0].ApplicationID
+ v6id := payset[1].ApplicationID
+ v5withv3csp := payset[2].ApplicationID
call := txntest.Txn{
Type: "appl",
@@ -2046,8 +2076,7 @@ func TestAppDowngrade(t *testing.T) {
ClearStateProgram: four.Program,
}
- vb := dl.fullBlock(&create)
- app := vb.Block().Payset[0].ApplicationID
+ app := dl.txn(&create).ApplicationID
update := txntest.Txn{
Type: "appl",
@@ -2077,8 +2106,7 @@ func TestAppDowngrade(t *testing.T) {
ClearStateProgram: four.Program,
}
- vb := dl.fullBlock(&create)
- app := vb.Block().Payset[0].ApplicationID
+ app := dl.txn(&create).ApplicationID
update := txntest.Txn{
Type: "appl",
@@ -2123,70 +2151,50 @@ func TestAppDowngrade(t *testing.T) {
})
}
-func TestCreatedAppsAreAvailable(t *testing.T) {
+func TestInnerCreatedAppsAreCallable(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
-
- ops, err := logic.AssembleStringWithVersion("int 1\nint 1\nassert", logic.AssemblerMaxVersion)
- require.NoError(t, err)
- program := "byte 0x" + hex.EncodeToString(ops.Program)
- createapp := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
- itxn_begin
- int appl; itxn_field TypeEnum
- ` + program + `; itxn_field ApprovalProgram
- ` + program + `; itxn_field ClearStateProgram
- int 1; itxn_field GlobalNumUint
- int 2; itxn_field LocalNumByteSlice
- int 3; itxn_field LocalNumUint
- itxn_submit`),
- }
-
- eval := nextBlock(t, l)
- txn(t, l, eval, &createapp)
- vb := endBlock(t, l, eval)
- index0 := vb.Block().Payset[0].ApplicationID
-
- fund0 := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: index0.Address(),
- Amount: 1_000_000,
- }
+ // 31 allowed inner appl.
+ ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
+ dl := NewDoubleLedger(t, genBalances, cv, cfg)
+ defer dl.Close()
- eval = nextBlock(t, l)
- txn(t, l, eval, &fund0)
- endBlock(t, l, eval)
+ ops, err := logic.AssembleStringWithVersion("int 1\nint 1\nassert", dl.generator.GenesisProto().LogicSigVersion)
+ require.NoError(t, err)
+ program := "byte 0x" + hex.EncodeToString(ops.Program)
- callTx := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApplicationID: index0,
- ForeignApps: []basics.AppIndex{},
- }
+ appID := dl.fundedApp(addrs[0], 1_000_000,
+ main(`
+ itxn_begin
+ int appl; itxn_field TypeEnum
+ `+program+`; itxn_field ApprovalProgram
+ `+program+`; itxn_field ClearStateProgram
+ int 1; itxn_field GlobalNumUint
+ int 2; itxn_field LocalNumByteSlice
+ int 3; itxn_field LocalNumUint
+ itxn_submit`))
+
+ callCreator := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: appID,
+ }
- eval = nextBlock(t, l)
- txn(t, l, eval, &callTx)
- endBlock(t, l, eval)
- index1 := basics.AppIndex(1)
+ tib := dl.txn(&callCreator)
+ createdID := tib.ApplyData.EvalDelta.InnerTxns[0].ApplyData.ApplicationID
+ require.NotZero(t, createdID)
- callTx = txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApplicationID: index1,
- ForeignApps: []basics.AppIndex{},
- }
+ callCreated := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: createdID,
+ }
- eval = nextBlock(t, l)
- txn(t, l, eval, &callTx)
- endBlock(t, l, eval)
+ dl.txn(&callCreated)
+ })
}
func TestInvalidAppsNotAccessible(t *testing.T) {
@@ -2194,56 +2202,53 @@ func TestInvalidAppsNotAccessible(t *testing.T) {
t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
- app0 := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
+ // v31 = inner appl
+ ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
+ dl := NewDoubleLedger(t, genBalances, cv, cfg)
+ defer dl.Close()
+
+ // make an app, which we'll try to use without setting up foreign array
+ tib := dl.txn(&txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ })
+ appID := tib.ApplyData.ApplicationID
+
+ // an app that tries to access appID when called
+ app0 := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: main(`
itxn_begin
int appl
itxn_field TypeEnum
- int 2
+ int ` + strconv.Itoa(int(appID)) + `
itxn_field ApplicationID
itxn_submit`),
- }
- eval := nextBlock(t, l)
- txn(t, l, eval, &app0)
- vb := endBlock(t, l, eval)
- index0 := vb.Block().Payset[0].ApplicationID
+ }
+ callerID := dl.txn(&app0).ApplicationID
- fund0 := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: index0.Address(),
- Amount: 1_000_000,
- }
+ fundCaller := txntest.Txn{
+ Type: "pay",
+ Sender: addrs[0],
+ Receiver: callerID.Address(),
+ Amount: 1_000_000,
+ }
+ dl.fullBlock(&fundCaller)
- app1 := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
-int 2
-int 2
-==
-assert
-`),
- }
- eval = nextBlock(t, l)
- txns(t, l, eval, &app1, &fund0)
- endBlock(t, l, eval)
+ callTx := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: callerID,
+ }
- callTx := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApplicationID: index0,
- ForeignApps: []basics.AppIndex{},
- }
+ dl.txn(&callTx, "unavailable App "+strconv.Itoa(int(appID)))
- eval = nextBlock(t, l)
- txn(t, l, eval, &callTx, "invalid App reference 2")
- endBlock(t, l, eval)
+ // confirm everything is done right if ForeignApps _is_ set up
+ callTx.ForeignApps = []basics.AppIndex{appID}
+ dl.txn(&callTx)
+ })
}
func TestInvalidAssetsNotAccessible(t *testing.T) {
@@ -2251,56 +2256,48 @@ func TestInvalidAssetsNotAccessible(t *testing.T) {
t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- l := newTestLedger(t, genBalances)
- defer l.Close()
- createapp := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: main(`
+ // v31 = inner appl
+ ledgertesting.TestConsensusRange(t, 31, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
+ dl := NewDoubleLedger(t, genBalances, cv, cfg)
+ defer dl.Close()
+
+ createasa := txntest.Txn{
+ Type: "acfg",
+ Sender: addrs[0],
+ AssetParams: basics.AssetParams{
+ Total: 1000000,
+ UnitName: "oz",
+ AssetName: "Gold",
+ URL: "https://gold.rush/",
+ },
+ }
+ asaID := dl.txn(&createasa).ConfigAsset
+ require.NotZero(t, asaID)
+
+ appID := dl.fundedApp(addrs[0], 1_000_000,
+ main(`
itxn_begin
int axfer; itxn_field TypeEnum
int 0; itxn_field Amount
- int 3; itxn_field XferAsset
+ int `+strconv.Itoa(int(asaID))+`; itxn_field XferAsset
global CurrentApplicationAddress; itxn_field Sender
global CurrentApplicationAddress; itxn_field AssetReceiver
itxn_submit
-`),
- }
- appIndex := basics.AppIndex(1)
-
- fund := txntest.Txn{
- Type: "pay",
- Sender: addrs[0],
- Receiver: appIndex.Address(),
- Amount: 1_000_000,
- }
-
- createasa := txntest.Txn{
- Type: "acfg",
- Sender: addrs[0],
- AssetParams: basics.AssetParams{
- Total: 1000000,
- Decimals: 3,
- UnitName: "oz",
- AssetName: "Gold",
- URL: "https://gold.rush/",
- },
- }
+`))
- eval := nextBlock(t, l)
- txns(t, l, eval, &createapp, &fund, &createasa)
- endBlock(t, l, eval)
+ use := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ ApplicationID: appID,
+ }
- use := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApplicationID: basics.AppIndex(1),
- }
+ dl.txn(&use, "unavailable Asset "+strconv.Itoa(int(asaID)))
+ // confirm everything is done right if ForeignAssets _is_ set up
+ use.ForeignAssets = []basics.AssetIndex{asaID}
+ dl.txn(&use)
- eval = nextBlock(t, l)
- txn(t, l, eval, &use, "invalid Asset reference 3")
- endBlock(t, l, eval)
+ })
}
func executeMegaContract(b *testing.B) {
@@ -2768,19 +2765,19 @@ itxn_submit
eval := nextBlock(t, l)
txn(t, l, eval, &app0)
vb := endBlock(t, l, eval)
- index0 := vb.Block().Payset[0].ApplicationID
+ id0 := vb.Block().Payset[0].ApplicationID
fund0 := txntest.Txn{
Type: "pay",
Sender: addrs[0],
- Receiver: index0.Address(),
+ Receiver: id0.Address(),
Amount: 1_000_000,
}
optin := txntest.Txn{
Type: "appl",
Sender: addrs[1],
- ApplicationID: index0,
+ ApplicationID: id0,
OnCompletion: transactions.OptInOC,
}
@@ -2800,7 +2797,7 @@ itxn_submit
clear := txntest.Txn{
Type: "appl",
Sender: addrs[1],
- ApplicationID: index0,
+ ApplicationID: id0,
OnCompletion: transactions.ClearStateOC,
}
@@ -2816,7 +2813,7 @@ itxn_submit
// had 2000 bump, now paid 2k, charge 1k, left with 3k total bump
require.Equal(t, uint64(3000), ad1-genBalances.Balances[addrs[1]].MicroAlgos.Raw)
// InnerTxn in block
- require.Equal(t, vb.Block().Payset[0].Txn.ApplicationID, index0)
+ require.Equal(t, vb.Block().Payset[0].Txn.ApplicationID, id0)
require.Equal(t, vb.Block().Payset[0].Txn.OnCompletion, transactions.ClearStateOC)
require.Len(t, vb.Block().Payset[0].EvalDelta.InnerTxns, 1)
require.Equal(t, vb.Block().Payset[0].EvalDelta.InnerTxns[0].Txn.Amount.Raw, uint64(2000))
@@ -2824,7 +2821,7 @@ itxn_submit
// Only the fee is paid because pay is "erased", so goes from 2k down to 1k
require.Equal(t, uint64(1000), ad1-genBalances.Balances[addrs[1]].MicroAlgos.Raw)
// no InnerTxn in block
- require.Equal(t, vb.Block().Payset[0].Txn.ApplicationID, index0)
+ require.Equal(t, vb.Block().Payset[0].Txn.ApplicationID, id0)
require.Equal(t, vb.Block().Payset[0].Txn.OnCompletion, transactions.ClearStateOC)
require.Len(t, vb.Block().Payset[0].EvalDelta.InnerTxns, 0)
}
@@ -2918,22 +2915,22 @@ check:
eval := nextBlock(t, l)
txns(t, l, eval, &appA, &appB, &appC)
vb := endBlock(t, l, eval)
- indexA := vb.Block().Payset[0].ApplicationID
- indexB := vb.Block().Payset[1].ApplicationID
- indexC := vb.Block().Payset[2].ApplicationID
+ idA := vb.Block().Payset[0].ApplicationID
+ idB := vb.Block().Payset[1].ApplicationID
+ idC := vb.Block().Payset[2].ApplicationID
fundA := txntest.Txn{
Type: "pay",
Sender: addrs[0],
- Receiver: indexA.Address(),
+ Receiver: idA.Address(),
Amount: 1_000_000,
}
callA := txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: indexA,
- ForeignApps: []basics.AppIndex{indexB, indexC},
+ ApplicationID: idA,
+ ForeignApps: []basics.AppIndex{idB, idC},
}
eval = nextBlock(t, l)
@@ -3032,22 +3029,22 @@ check:
eval := nextBlock(t, l)
txns(t, l, eval, &appA, &appB, &appC)
vb := endBlock(t, l, eval)
- indexA := vb.Block().Payset[0].ApplicationID
- indexB := vb.Block().Payset[1].ApplicationID
- indexC := vb.Block().Payset[2].ApplicationID
+ idA := vb.Block().Payset[0].ApplicationID
+ idB := vb.Block().Payset[1].ApplicationID
+ idC := vb.Block().Payset[2].ApplicationID
fundA := txntest.Txn{
Type: "pay",
Sender: addrs[0],
- Receiver: indexA.Address(),
+ Receiver: idA.Address(),
Amount: 1_000_000,
}
callA := txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: indexA,
- ForeignApps: []basics.AppIndex{indexB, indexC},
+ ApplicationID: idA,
+ ForeignApps: []basics.AppIndex{idB, idC},
}
eval = nextBlock(t, l)
@@ -3083,38 +3080,33 @@ itxn_submit
`),
}
- vb := dl.fullBlock(&appA, &appB)
- index0 := vb.Block().Payset[0].ApplicationID
- index1 := vb.Block().Payset[1].ApplicationID
+ payset := dl.txns(&appA, &appB)
+ id0 := payset[0].ApplicationID
+ id1 := payset[1].ApplicationID
fund1 := txntest.Txn{
Type: "pay",
Sender: addrs[0],
- Receiver: index1.Address(),
+ Receiver: id1.Address(),
Amount: 1_000_000_000,
}
fund0 := fund1
- fund0.Receiver = index0.Address()
+ fund0.Receiver = id0.Address()
callTx := txntest.Txn{
Type: "appl",
Sender: addrs[2],
- ApplicationID: index1,
- ForeignApps: []basics.AppIndex{index0},
+ ApplicationID: id1,
+ ForeignApps: []basics.AppIndex{id0},
}
- dl.beginBlock()
if ver <= 33 {
dl.txgroup("invalid Account reference", &fund0, &fund1, &callTx)
- dl.endBlock()
return
}
-
- dl.txgroup("", &fund0, &fund1, &callTx)
- vb = dl.endBlock()
-
- require.Equal(t, index0.Address(), vb.Block().Payset[2].EvalDelta.InnerTxns[0].Txn.Receiver)
- require.Equal(t, uint64(100), vb.Block().Payset[2].EvalDelta.InnerTxns[0].Txn.Amount.Raw)
+ payset = dl.txgroup("", &fund0, &fund1, &callTx)
+ require.Equal(t, id0.Address(), payset[2].EvalDelta.InnerTxns[0].Txn.Receiver)
+ require.Equal(t, uint64(100), payset[2].EvalDelta.InnerTxns[0].Txn.Amount.Raw)
})
}
@@ -3132,44 +3124,66 @@ func TestForeignAppAccountsImmutable(t *testing.T) {
appA := txntest.Txn{
Type: "appl",
Sender: addrs[0],
+ ApprovalProgram: main(`
+itxn_begin
+int appl; itxn_field TypeEnum
+txn Applications 1; itxn_field ApplicationID
+int OptIn; itxn_field OnCompletion
+itxn_submit
+`),
}
appB := txntest.Txn{
Type: "appl",
Sender: addrs[0],
ApprovalProgram: main(`
+txn NumApplications // allow "bare" optin
+bz end
txn Applications 1
app_params_get AppAddress
+assert
byte "X"
byte "ABC"
app_local_put
-int 1
`),
+ LocalStateSchema: basics.StateSchema{NumByteSlice: 1},
}
- vb := dl.fullBlock(&appA, &appB)
- index0 := vb.Block().Payset[0].ApplicationID
- index1 := vb.Block().Payset[1].ApplicationID
+ payset := dl.txns(&appA, &appB)
+ id0 := payset[0].ApplicationID
+ id1 := payset[1].ApplicationID
fund1 := txntest.Txn{
Type: "pay",
Sender: addrs[0],
- Receiver: index1.Address(),
+ Receiver: id1.Address(),
Amount: 1_000_000_000,
}
fund0 := fund1
- fund0.Receiver = index0.Address()
+ fund0.Receiver = id0.Address()
+
+ optin := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[2],
+ ApplicationID: id0,
+ ForeignApps: []basics.AppIndex{id1},
+ }
callTx := txntest.Txn{
Type: "appl",
Sender: addrs[2],
- ApplicationID: index1,
- ForeignApps: []basics.AppIndex{index0},
+ ApplicationID: id1,
+ ForeignApps: []basics.AppIndex{id0},
}
- dl.beginBlock()
- dl.txgroup("invalid Account reference", &fund0, &fund1, &callTx)
- dl.endBlock()
+ var problem string
+ switch {
+ case ver < 34: // before v7, app accounts not available at all
+ problem = "invalid Account reference " + id0.Address().String()
+ case ver < 38: // as of v7, it's the mutation that's the problem
+ problem = "invalid Account reference for mutation"
+ }
+ dl.txgroup(problem, &fund0, &fund1, &optin, &callTx)
})
}
@@ -3219,38 +3233,37 @@ done:
},
}
- vb := dl.fullBlock(&appA, &appB)
- index0 := vb.Block().Payset[0].ApplicationID
- index1 := vb.Block().Payset[1].ApplicationID
+ payset := dl.txns(&appA, &appB)
+ id0 := payset[0].ApplicationID
+ id1 := payset[1].ApplicationID
fund1 := txntest.Txn{
Type: "pay",
Sender: addrs[0],
- Receiver: index1.Address(),
+ Receiver: id1.Address(),
Amount: 1_000_000_000,
}
fund0 := fund1
- fund0.Receiver = index0.Address()
- fund1.Receiver = index1.Address()
+ fund0.Receiver = id0.Address()
+ fund1.Receiver = id1.Address()
callA := txntest.Txn{
Type: "appl",
Sender: addrs[2],
- ApplicationID: index0,
- ForeignApps: []basics.AppIndex{index1},
+ ApplicationID: id0,
+ ForeignApps: []basics.AppIndex{id1},
}
callB := txntest.Txn{
Type: "appl",
Sender: addrs[2],
- ApplicationID: index1,
- ForeignApps: []basics.AppIndex{index0},
- Accounts: []basics.Address{index0.Address()},
+ ApplicationID: id1,
+ ForeignApps: []basics.AppIndex{id0},
+ Accounts: []basics.Address{id0.Address()},
}
- vb = dl.fullBlock(&fund0, &fund1, &callA, &callB)
-
- require.Equal(t, "Y", vb.Block().Payset[3].EvalDelta.LocalDeltas[1]["X"].Bytes)
+ payset = dl.txns(&fund0, &fund1, &callA, &callB)
+ require.Equal(t, "Y", payset[3].EvalDelta.LocalDeltas[1]["X"].Bytes)
})
}
@@ -3293,6 +3306,10 @@ func TestEvalAppState(t *testing.T) {
dl := NewDoubleLedger(t, genBalances, cv, cfg)
defer dl.Close()
+ appID := basics.AppIndex(1)
+ if ver >= 38 { // AppForbidLowResources
+ appID += 1000
+ }
appcall1 := txntest.Txn{
Type: protocol.ApplicationCallTx,
Sender: addrs[0],
@@ -3316,7 +3333,7 @@ ok:
appcall2 := txntest.Txn{
Type: protocol.ApplicationCallTx,
Sender: addrs[0],
- ApplicationID: 1,
+ ApplicationID: appID,
}
dl.beginBlock()
@@ -3327,11 +3344,11 @@ ok:
vb := dl.endBlock()
deltas := vb.Delta()
- params, ok := deltas.Accts.GetAppParams(addrs[0], 1)
- require.True(t, ok)
- state := params.Params.GlobalState
- require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addrs[0][:])}, state["caller"])
- require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addrs[0][:])}, state["creator"])
+ params, _ := deltas.Accts.GetAppParams(addrs[0], appID)
+ require.Equal(t, basics.TealKeyValue{
+ "caller": {Type: basics.TealBytesType, Bytes: string(addrs[0][:])},
+ "creator": {Type: basics.TealBytesType, Bytes: string(addrs[0][:])},
+ }, params.Params.GlobalState)
})
}
@@ -3384,9 +3401,9 @@ func TestRewardsInAD(t *testing.T) {
dl.fullBlock()
}
- vb := dl.fullBlock(&payTxn, &payNonPart)
- payInBlock := vb.Block().Payset[0]
- nonPartInBlock := vb.Block().Payset[1]
+ payset := dl.txns(&payTxn, &payNonPart)
+ payInBlock := payset[0]
+ nonPartInBlock := payset[1]
if ver >= 15 {
require.Greater(t, payInBlock.ApplyData.SenderRewards.Raw, uint64(1000))
require.Greater(t, payInBlock.ApplyData.ReceiverRewards.Raw, uint64(1000))
@@ -3407,31 +3424,31 @@ func TestRewardsInAD(t *testing.T) {
})
}
-// TestDeleteNonExistantKeys checks if the EvalDeltas from deleting missing keys are correct
-func TestDeleteNonExistantKeys(t *testing.T) {
+// TestDeleteNonExistentKeys checks if the EvalDeltas from deleting missing keys are correct
+func TestDeleteNonExistentKeys(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
genBalances, addrs, _ := ledgertesting.NewTestGenesis()
- // AVM v2 (apps)
- ledgertesting.TestConsensusRange(t, 24, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
+ // AVM v4 start, so we can use `txn Sender`
+ ledgertesting.TestConsensusRange(t, 28, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
dl := NewDoubleLedger(t, genBalances, cv, cfg)
defer dl.Close()
- const appID basics.AppIndex = 1
-
createTxn := txntest.Txn{
Type: "appl",
Sender: addrs[0],
ApprovalProgram: main(`
byte "missing_global"
app_global_del
-int 0
+txn Sender
byte "missing_local"
app_local_del
`),
}
+ appID := dl.txn(&createTxn).ApplyData.ApplicationID
+
optInTxn := txntest.Txn{
Type: "appl",
Sender: addrs[1],
@@ -3439,14 +3456,14 @@ app_local_del
OnCompletion: transactions.OptInOC,
}
- vb := dl.fullBlock(&createTxn, &optInTxn)
- require.Len(t, vb.Block().Payset[1].EvalDelta.GlobalDelta, 0)
+ tib := dl.txn(&optInTxn)
+ require.Len(t, tib.EvalDelta.GlobalDelta, 0)
// For a while, we encoded an empty localdelta
deltas := 1
if ver >= 27 {
deltas = 0
}
- require.Len(t, vb.Block().Payset[1].EvalDelta.LocalDeltas, deltas)
+ require.Len(t, tib.EvalDelta.LocalDeltas, deltas)
})
}
@@ -3552,8 +3569,7 @@ func TestLogsInBlock(t *testing.T) {
// Fail the clear state
ClearStateProgram: "byte \"CLR\"\n log\n int 0",
}
- vb := dl.fullBlock(&createTxn)
- createInBlock := vb.Block().Payset[0]
+ createInBlock := dl.txn(&createTxn)
appID := createInBlock.ApplyData.ApplicationID
require.Equal(t, "APP", createInBlock.ApplyData.EvalDelta.Logs[0])
@@ -3563,8 +3579,7 @@ func TestLogsInBlock(t *testing.T) {
ApplicationID: appID,
OnCompletion: transactions.OptInOC,
}
- vb = dl.fullBlock(&optInTxn)
- optInInBlock := vb.Block().Payset[0]
+ optInInBlock := dl.txn(&optInTxn)
require.Equal(t, "APP", optInInBlock.ApplyData.EvalDelta.Logs[0])
clearTxn := txntest.Txn{
@@ -3573,8 +3588,7 @@ func TestLogsInBlock(t *testing.T) {
ApplicationID: appID,
OnCompletion: transactions.ClearStateOC,
}
- vb = dl.fullBlock(&clearTxn)
- clearInBlock := vb.Block().Payset[0]
+ clearInBlock := dl.txn(&clearTxn)
// Logs do not appear if the ClearState failed
require.Len(t, clearInBlock.ApplyData.EvalDelta.Logs, 0)
})
@@ -3605,8 +3619,6 @@ func TestUnfundedSenders(t *testing.T) {
dl := NewDoubleLedger(t, genBalances, cv, cfg)
defer dl.Close()
- asaIndex := basics.AssetIndex(1)
-
ghost := basics.Address{0x01}
asaCreate := txntest.Txn{
@@ -3625,7 +3637,17 @@ func TestUnfundedSenders(t *testing.T) {
Sender: addrs[0],
}
- dl.fullBlock(&asaCreate, &appCreate)
+ payset := dl.txns(&asaCreate, &appCreate)
+ asaID := payset[0].ApplyData.ConfigAsset
+ // we are testing some versions before ApplyData.ConfigAsset was
+ // populated. At that time, initial ID was 1, so we can hardcode.
+ if asaID == 0 {
+ asaID = 1
+ }
+ appID := payset[1].ApplyData.ApplicationID
+ if appID == 0 {
+ appID = 2
+ }
// Advance so that rewardsLevel increases
for i := 1; i < 10; i++ {
@@ -3661,14 +3683,14 @@ func TestUnfundedSenders(t *testing.T) {
Sender: ghost,
AssetReceiver: addrs[0],
AssetSender: addrs[1],
- XferAsset: asaIndex,
+ XferAsset: asaID,
Fee: 0,
},
{ // Freeze
Type: "afrz",
Sender: ghost,
FreezeAccount: addrs[0], // creator, therefore is opted in
- FreezeAsset: asaIndex,
+ FreezeAsset: asaID,
AssetFrozen: true,
Fee: 0,
},
@@ -3676,14 +3698,14 @@ func TestUnfundedSenders(t *testing.T) {
Type: "afrz",
Sender: ghost,
FreezeAccount: addrs[0], // creator, therefore is opted in
- FreezeAsset: asaIndex,
+ FreezeAsset: asaID,
AssetFrozen: false,
Fee: 0,
},
{ // App call
Type: "appl",
Sender: ghost,
- ApplicationID: basics.AppIndex(2),
+ ApplicationID: appID,
Fee: 0,
},
{ // App creation (only works because it's also deleted)
@@ -3694,7 +3716,7 @@ func TestUnfundedSenders(t *testing.T) {
},
}
- // v34 is the likely version for UnfundedSenders. Change if that doesn't happen.
+ // v34 enabled UnfundedSenders
var problem string
if ver < 34 {
// In the old days, balances.Move would try to increase the rewardsState on the unfunded account
@@ -3723,17 +3745,15 @@ func TestAppCallAppDuringInit(t *testing.T) {
Sender: addrs[0],
}
- // construct a simple app
- vb := dl.fullBlock(&approve)
-
- // now make a new app that calls it during init
- approveID := vb.Block().Payset[0].ApplicationID
+ // construct a simple approval app
+ approveID := dl.txn(&approve).ApplicationID
// Advance so that rewardsLevel increases
for i := 1; i < 10; i++ {
dl.fullBlock()
}
+ // now make a new app that calls it during init
callInInit := txntest.Txn{
Type: "appl",
Sender: addrs[0],
diff --git a/ledger/archival_test.go b/ledger/archival_test.go
index f30821a35..de483e227 100644
--- a/ledger/archival_test.go
+++ b/ledger/archival_test.go
@@ -37,7 +37,7 @@ import (
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
- "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/eval"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/ledger/store/blockdb"
"github.com/algorand/go-algorand/ledger/store/trackerdb"
@@ -68,7 +68,7 @@ func (wl *wrappedLedger) BlockHdr(rnd basics.Round) (bookkeeping.BlockHeader, er
return wl.l.BlockHdr(rnd)
}
-func (wl *wrappedLedger) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger internal.LedgerForEvaluator) (ledgercore.StateDelta, error) {
+func (wl *wrappedLedger) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger eval.LedgerForEvaluator) (ledgercore.StateDelta, error) {
return wl.l.trackerEvalVerified(blk, accUpdatesLedger)
}
diff --git a/ledger/boxtxn_test.go b/ledger/boxtxn_test.go
index 47291575b..3a66583d8 100644
--- a/ledger/boxtxn_test.go
+++ b/ledger/boxtxn_test.go
@@ -19,6 +19,7 @@ package ledger
import (
"bytes"
"encoding/binary"
+ "fmt"
"strings"
"testing"
"time"
@@ -146,21 +147,19 @@ func TestBoxCreate(t *testing.T) {
proto := config.Consensus[cv]
mbr := boxFee(proto, 28)
- appIndex := dl.fundedApp(addrs[0], proto.MinBalance+3*mbr, boxAppSource)
+ appID := dl.fundedApp(addrs[0], proto.MinBalance+3*mbr, boxAppSource)
call := txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: appIndex,
+ ApplicationID: appID,
}
adam := call.Args("create", "adam")
- dl.txn(adam, "invalid Box reference adam")
+ dl.txn(adam, fmt.Sprintf("invalid Box reference %#x", "adam"))
adam.Boxes = []transactions.BoxRef{{Index: 0, Name: []byte("adam")}}
- dl.beginBlock()
- dl.txn(adam)
- vb := dl.endBlock()
+ vb := dl.fullBlock(adam)
// confirm the deltas has the creation
require.Len(t, vb.Delta().KvMods, 1)
@@ -170,28 +169,28 @@ func TestBoxCreate(t *testing.T) {
}
dl.txn(adam.Args("check", "adam", "\x00\x00"))
- dl.txgroup("box_create\nassert", adam.Noted("one"), adam.Noted("two"))
+ dl.txgroup("box_create; assert", adam.Noted("one"), adam.Noted("two"))
bobo := call.Args("create", "bobo")
- dl.txn(bobo, "invalid Box reference bobo")
+ dl.txn(bobo, fmt.Sprintf("invalid Box reference %#x", "bobo"))
bobo.Boxes = []transactions.BoxRef{{Index: 0, Name: []byte("bobo")}}
dl.txn(bobo)
- dl.txgroup("box_create\nassert", bobo.Noted("one"), bobo.Noted("two"))
+ dl.txgroup("box_create; assert", bobo.Noted("one"), bobo.Noted("two"))
dl.beginBlock()
chaz := call.Args("create", "chaz")
chaz.Boxes = []transactions.BoxRef{{Index: 0, Name: []byte("chaz")}}
dl.txn(chaz)
- dl.txn(chaz.Noted("again"), "box_create\nassert")
+ dl.txn(chaz.Noted("again"), "box_create; assert")
dl.endBlock()
// new block
- dl.txn(chaz.Noted("again"), "box_create\nassert")
+ dl.txn(chaz.Noted("again"), "box_create; assert")
dogg := call.Args("create", "dogg")
dogg.Boxes = []transactions.BoxRef{{Index: 0, Name: []byte("dogg")}}
dl.txn(dogg, "below min")
dl.txn(chaz.Args("delete", "chaz"))
- dl.txn(chaz.Args("delete", "chaz").Noted("again"), "box_del\nassert")
+ dl.txn(chaz.Args("delete", "chaz").Noted("again"), "box_del; assert")
dl.txn(dogg)
dl.txn(bobo.Args("delete", "bobo"))
@@ -218,19 +217,19 @@ func TestBoxRecreate(t *testing.T) {
proto := config.Consensus[cv]
mbr := boxFee(proto, 8)
- appIndex := dl.fundedApp(addrs[0], proto.MinBalance+mbr, boxAppSource)
+ appID := dl.fundedApp(addrs[0], proto.MinBalance+mbr, boxAppSource)
call := txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: appIndex,
+ ApplicationID: appID,
Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("adam")}},
}
create := call.Args("create", "adam", "\x04") // box value size is 4 bytes
recreate := call.Args("recreate", "adam", "\x04")
- dl.txn(recreate, "box_create\n!\nassert")
+ dl.txn(recreate, "box_create; !; assert")
dl.txn(create)
dl.txn(recreate)
dl.txn(call.Args("set", "adam", "\x01\x02\x03\x04"))
@@ -277,15 +276,19 @@ func TestBoxCreateAvailability(t *testing.T) {
`,
}
- // We know box_create worked because we finished and checked MBR
+ // We know box_create worked because this failure (checking the MBR)
+ // happens at the end of the group evaluation.
dl.txn(&accessInCreate, "balance 0 below min")
// But let's fund it and be sure. This is "psychic". We're going to fund
// the app address that we know the app will get. So this is a nice
// test, but unrealistic way to actual create a box.
psychic := basics.AppIndex(2)
-
proto := config.Consensus[cv]
+ if proto.AppForbidLowResources {
+ psychic += 1000
+ }
+
dl.txn(&txntest.Txn{
Type: "pay",
Sender: addrs[0],
@@ -351,12 +354,10 @@ func TestBoxCreateAvailability(t *testing.T) {
ApplicationID: trampoline,
}
- dl.beginBlock()
- dl.txgroup("", &accessWhenCalled, &call)
- vb := dl.endBlock()
+ payset := dl.txgroup("", &accessWhenCalled, &call)
// Make sure that we actually did it.
- require.Equal(t, "we did it", vb.Block().Payset[1].ApplyData.EvalDelta.InnerTxns[1].EvalDelta.Logs[0])
+ require.Equal(t, "we did it", payset[1].ApplyData.EvalDelta.InnerTxns[1].EvalDelta.Logs[0])
})
}
@@ -375,18 +376,16 @@ func TestBoxRW(t *testing.T) {
log := logging.NewLogger()
log.SetOutput(&bufNewLogger)
- appIndex := dl.fundedApp(addrs[0], 1_000_000, boxAppSource)
+ appID := dl.fundedApp(addrs[0], 1_000_000, boxAppSource)
call := txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: appIndex,
+ ApplicationID: appID,
Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("x")}},
}
- dl.txn(call.Args("create", "x", "\x10")) // 16
- dl.beginBlock()
- dl.txn(call.Args("set", "x", "ABCDEFGHIJ")) // 10 long
- vb := dl.endBlock()
+ dl.txn(call.Args("create", "x", "\x10")) // 16
+ vb := dl.fullBlock(call.Args("set", "x", "ABCDEFGHIJ")) // 10 long
// confirm the deltas has the change, including the old value
require.Len(t, vb.Delta().KvMods, 1)
for _, kvDelta := range vb.Delta().KvMods { // There's only one
@@ -420,7 +419,7 @@ func TestBoxRW(t *testing.T) {
time.Sleep(100 * time.Millisecond) // give commit time to run, and prune au caches
dl.fullBlock(call.Args("check", "x", "ABCDEFGH"))
- dl.txn(call.Args("create", "yy"), "invalid Box reference yy")
+ dl.txn(call.Args("create", "yy"), fmt.Sprintf("invalid Box reference %#x", "yy"))
withBr := call.Args("create", "yy")
withBr.Boxes = append(withBr.Boxes, transactions.BoxRef{Index: 1, Name: []byte("yy")})
require.Error(dl.t, withBr.Txn().WellFormed(transactions.SpecialAddresses{}, dl.generator.GenesisProto()))
@@ -451,11 +450,11 @@ func TestBoxAccountData(t *testing.T) {
log := logging.NewLogger()
log.SetOutput(&bufNewLogger)
- appIndex := dl.fundedApp(addrs[0], 1_000_000, boxAppSource)
+ appID := dl.fundedApp(addrs[0], 1_000_000, boxAppSource)
call := txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: appIndex,
+ ApplicationID: appID,
Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("x")}, {Index: 0, Name: []byte("y")}},
}
@@ -484,12 +483,12 @@ assert
==
assert
`)
- verifyAppIndex := dl.fundedApp(addrs[0], 0, verifyAppSrc)
+ verifyAppID := dl.fundedApp(addrs[0], 0, verifyAppSrc)
verifyAppCall := txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: verifyAppIndex,
- Accounts: []basics.Address{appIndex.Address()},
+ ApplicationID: verifyAppID,
+ Accounts: []basics.Address{appID.Address()},
}
// The app account has no box data initially
@@ -533,11 +532,11 @@ func TestBoxIOBudgets(t *testing.T) {
dl := NewDoubleLedger(t, genBalances, cv, cfg)
defer dl.Close()
- appIndex := dl.fundedApp(addrs[0], 0, boxAppSource)
+ appID := dl.fundedApp(addrs[0], 0, boxAppSource)
call := txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: appIndex,
+ ApplicationID: appID,
Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("x")}},
}
dl.txn(call.Args("create", "x", "\x10\x00"), // 4096
@@ -559,7 +558,7 @@ func TestBoxIOBudgets(t *testing.T) {
fundApp := txntest.Txn{
Type: "pay",
Sender: addrs[0],
- Receiver: appIndex.Address(),
+ Receiver: appID.Address(),
Amount: proto.MinBalance + boxFee(proto, 4096+1), // remember key len!
}
create := call.Args("create", "x", "\x10\x00")
@@ -604,21 +603,21 @@ func TestBoxInners(t *testing.T) {
dl.txn(&txntest.Txn{Type: "pay", Sender: addrs[0], Receiver: addrs[0]})
dl.txn(&txntest.Txn{Type: "pay", Sender: addrs[0], Receiver: addrs[0]})
- boxIndex := dl.fundedApp(addrs[0], 2_000_000, boxAppSource) // there are some big boxes made
- passIndex := dl.fundedApp(addrs[0], 120_000, passThruSource) // lowish, show it's not paying for boxes
+ boxID := dl.fundedApp(addrs[0], 2_000_000, boxAppSource) // there are some big boxes made
+ passID := dl.fundedApp(addrs[0], 120_000, passThruSource) // lowish, show it's not paying for boxes
call := txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: passIndex,
- ForeignApps: []basics.AppIndex{boxIndex},
+ ApplicationID: passID,
+ ForeignApps: []basics.AppIndex{boxID},
Boxes: []transactions.BoxRef{{Index: 0, Name: []byte("x")}},
}
// The current Boxes gives top-level access to "x", not the inner app
dl.txn(call.Args("create", "x", "\x10"), // 8
- "invalid Box reference x")
+ fmt.Sprintf("invalid Box reference %#x", 'x'))
// This isn't right: Index should be index into ForeignApps
- call.Boxes = []transactions.BoxRef{{Index: uint64(boxIndex), Name: []byte("x")}}
+ call.Boxes = []transactions.BoxRef{{Index: uint64(boxID), Name: []byte("x")}}
require.Error(t, call.Txn().WellFormed(transactions.SpecialAddresses{}, dl.generator.genesisProto))
call.Boxes = []transactions.BoxRef{{Index: 1, Name: []byte("x")}}
@@ -657,7 +656,7 @@ func TestBoxInners(t *testing.T) {
dl.txgroup("", checkX, checkY)
require.Len(t, setY.Boxes, 2) // recall that setY has ("y", "nope") right now. no "x"
- dl.txgroup("invalid Box reference x", checkX, setY)
+ dl.txgroup(fmt.Sprintf("invalid Box reference %#x", 'x'), checkX, setY)
setY.Boxes = append(setY.Boxes, transactions.BoxRef{Index: 1, Name: []byte("x")})
dl.txgroup("", checkX, setY)
@@ -671,15 +670,15 @@ func TestBoxInners(t *testing.T) {
// Try some get/put action
dl.txn(call.Args("put", "x", "john doe"))
- vb := dl.fullBlock(call.Args("get", "x"))
+ tib := dl.txn(call.Args("get", "x"))
// we are passing this thru to the underlying box app which logs the get
- require.Equal(t, "john doe", vb.Block().Payset[0].ApplyData.EvalDelta.InnerTxns[0].EvalDelta.Logs[0])
+ require.Equal(t, "john doe", tib.ApplyData.EvalDelta.InnerTxns[0].EvalDelta.Logs[0])
dl.txn(call.Args("check", "x", "john"))
// bad change because of length
dl.txn(call.Args("put", "x", "steve doe"), "box_put wrong size")
- vb = dl.fullBlock(call.Args("get", "x"))
- require.Equal(t, "john doe", vb.Block().Payset[0].ApplyData.EvalDelta.InnerTxns[0].EvalDelta.Logs[0])
+ tib = dl.txn(call.Args("get", "x"))
+ require.Equal(t, "john doe", tib.ApplyData.EvalDelta.InnerTxns[0].EvalDelta.Logs[0])
// good change
dl.txn(call.Args("put", "x", "mark doe"))
diff --git a/ledger/bulletin.go b/ledger/bulletin.go
index cea20f1e7..b05848193 100644
--- a/ledger/bulletin.go
+++ b/ledger/bulletin.go
@@ -81,10 +81,7 @@ func (b *bulletin) Wait(round basics.Round) chan struct{} {
}
func (b *bulletin) loadFromDisk(l ledgerForTracker, _ basics.Round) error {
- // We want to keep existing notification requests in memory if this flow is triggered by reloadLedger.
- if b.pendingNotificationRequests == nil {
- b.pendingNotificationRequests = make(map[basics.Round]notifier)
- }
+ b.pendingNotificationRequests = make(map[basics.Round]notifier)
b.latestRound = l.Latest()
return nil
}
@@ -126,7 +123,7 @@ func (b *bulletin) postCommit(ctx context.Context, dcc *deferredCommitContext) {
func (b *bulletin) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) {
}
-func (b *bulletin) handleUnorderedCommit(*deferredCommitContext) {
+func (b *bulletin) handleUnorderedCommitOrError(*deferredCommitContext) {
}
func (b *bulletin) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go
index ef3834c12..355559ef8 100644
--- a/ledger/catchpointtracker.go
+++ b/ledger/catchpointtracker.go
@@ -22,6 +22,7 @@ import (
"compress/gzip"
"context"
"database/sql"
+ "encoding/base32"
"encoding/hex"
"errors"
"fmt"
@@ -56,10 +57,22 @@ const (
// CatchpointFileVersionV5 is the catchpoint file version that was used when the database schema was V0-V5.
CatchpointFileVersionV5 = uint64(0200)
- // CatchpointFileVersionV6 is the catchpoint file version that is matching database schema V6.
+ // CatchpointFileVersionV6 is the catchpoint file version that is matching database schema since V6.
// This version introduced accounts and resources separation. The first catchpoint
// round of this version is >= `reenableCatchpointsRound`.
CatchpointFileVersionV6 = uint64(0201)
+ // CatchpointFileVersionV7 is the catchpoint file version that is matching database schema V10.
+ // This version introduced state proof verification data and versioning for CatchpointLabel.
+ CatchpointFileVersionV7 = uint64(0202)
+
+ // CatchpointContentFileName is a name of a file with catchpoint header info inside tar archive
+ CatchpointContentFileName = "content.msgpack"
+ // catchpointSPVerificationFileName is a name of a file with stateproof verification data
+ catchpointSPVerificationFileName = "stateProofVerificationContext.msgpack"
+ // catchpointBalancesFileNameTemplate is a template name of files with balances data
+ catchpointBalancesFileNameTemplate = "balances.%d.msgpack"
+ catchpointBalancesFileNamePrefix = "balances."
+ catchpointBalancesFileNameSuffix = ".msgpack"
)
func catchpointStage1Encoder(w io.Writer) (io.WriteCloser, error) {
@@ -123,11 +136,11 @@ type catchpointTracker struct {
// roundDigest stores the digest of the block for every round starting with dbRound+1 and every round after it.
roundDigest []crypto.Digest
- // reenableCatchpointsRound is a round where the EnableOnlineAccountCatchpoints feature was enabled via the consensus.
+ // reenableCatchpointsRound is a round where the EnableCatchpointsWithSPContexts feature was enabled via the consensus.
// we avoid generating catchpoints before that round in order to ensure the network remain consistent in the catchpoint
// label being produced. This variable could be "wrong" in two cases -
- // 1. It's zero, meaning that the EnableOnlineAccountCatchpoints has yet to be seen.
- // 2. It's non-zero meaning that it the given round is after the EnableOnlineAccountCatchpoints was enabled ( it might be exact round
+ // 1. It's zero, meaning that the EnableCatchpointsWithSPContexts has yet to be seen.
+ // 2. It's non-zero meaning that it the given round is after the EnableCatchpointsWithSPContexts was enabled ( it might be exact round
// but that's only if newBlock was called with that round ), plus the lookback.
reenableCatchpointsRound basics.Round
@@ -195,6 +208,8 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic
var totalAccounts uint64
var totalChunks uint64
var biggestChunkLen uint64
+ var spVerificationHash crypto.Digest
+ var catchpointGenerationStats telemetryspec.CatchpointGenerationEventDetails
if ct.enableGeneratingCatchpointFiles {
// Generate the catchpoint file. This is done inline so that it will
@@ -202,8 +217,10 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic
// expects that the accounts data would not be modified in the
// background during its execution.
var err error
- totalKVs, totalAccounts, totalChunks, biggestChunkLen, err = ct.generateCatchpointData(
- ctx, dbRound, updatingBalancesDuration)
+
+ catchpointGenerationStats.BalancesWriteTime = uint64(updatingBalancesDuration.Nanoseconds())
+ totalKVs, totalAccounts, totalChunks, biggestChunkLen, spVerificationHash, err = ct.generateCatchpointData(
+ ctx, dbRound, &catchpointGenerationStats)
atomic.StoreInt32(&ct.catchpointDataWriting, 0)
if err != nil {
return err
@@ -216,7 +233,7 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic
return err
}
- err = ct.recordFirstStageInfo(ctx, tx, dbRound, totalKVs, totalAccounts, totalChunks, biggestChunkLen)
+ err = ct.recordFirstStageInfo(ctx, tx, &catchpointGenerationStats, dbRound, totalKVs, totalAccounts, totalChunks, biggestChunkLen, spVerificationHash)
if err != nil {
return err
}
@@ -356,7 +373,7 @@ func (ct *catchpointTracker) newBlock(blk bookkeeping.Block, delta ledgercore.St
ct.roundDigest = append(ct.roundDigest, blk.Digest())
- if (config.Consensus[blk.CurrentProtocol].EnableOnlineAccountCatchpoints || ct.forceCatchpointFileWriting) && ct.reenableCatchpointsRound == 0 {
+ if (config.Consensus[blk.CurrentProtocol].EnableCatchpointsWithSPContexts || ct.forceCatchpointFileWriting) && ct.reenableCatchpointsRound == 0 {
catchpointLookback := config.Consensus[blk.CurrentProtocol].CatchpointLookback
if catchpointLookback == 0 {
catchpointLookback = config.Consensus[blk.CurrentProtocol].MaxBalLookback
@@ -432,7 +449,7 @@ func (ct *catchpointTracker) produceCommittingTask(committedRound basics.Round,
ct.catchpointInterval, dcr.catchpointLookback)
// if we're still writing the previous balances, we can't move forward yet.
- if ct.IsWritingCatchpointDataFile() {
+ if ct.isWritingCatchpointDataFile() {
// if we hit this path, it means that we're still writing a catchpoint.
// see if the new delta range contains another catchpoint.
if hasIntermediateFirstStageRound {
@@ -452,8 +469,6 @@ func (ct *catchpointTracker) produceCommittingTask(committedRound basics.Round,
dcr.catchpointFirstStage = true
if ct.enableGeneratingCatchpointFiles {
- // store non-zero ( all ones ) into the catchpointWriting atomic variable to indicate that a catchpoint is being written ( or, queued to be written )
- atomic.StoreInt32(&ct.catchpointDataWriting, int32(-1))
ct.catchpointDataSlowWriting = make(chan struct{}, 1)
if hasMultipleIntermediateFirstStageRounds {
close(ct.catchpointDataSlowWriting)
@@ -461,7 +476,6 @@ func (ct *catchpointTracker) produceCommittingTask(committedRound basics.Round,
}
}
- dcr.catchpointDataWriting = &ct.catchpointDataWriting
dcr.enableGeneratingCatchpointFiles = ct.enableGeneratingCatchpointFiles
rounds := ct.calculateCatchpointRounds(dcr)
@@ -476,6 +490,11 @@ func (ct *catchpointTracker) prepareCommit(dcc *deferredCommitContext) error {
ct.catchpointsMu.RLock()
defer ct.catchpointsMu.RUnlock()
+ if ct.enableGeneratingCatchpointFiles && dcc.catchpointFirstStage {
+ // store non-zero ( all ones ) into the catchpointWriting atomic variable to indicate that a catchpoint is being written
+ atomic.StoreInt32(&ct.catchpointDataWriting, int32(-1))
+ }
+
dcc.committedRoundDigests = make([]crypto.Digest, dcc.offset)
copy(dcc.committedRoundDigests, ct.roundDigest[:dcc.offset])
@@ -587,7 +606,7 @@ func doRepackCatchpoint(ctx context.Context, header CatchpointFileHeader, bigges
bytes := protocol.Encode(&header)
err := out.WriteHeader(&tar.Header{
- Name: "content.msgpack",
+ Name: CatchpointContentFileName,
Mode: 0600,
Size: int64(len(bytes)),
})
@@ -711,8 +730,8 @@ func repackCatchpoint(ctx context.Context, header CatchpointFileHeader, biggestC
// the unfinished catchpoint record.
func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound basics.Round, round basics.Round, dataInfo trackerdb.CatchpointFirstStageInfo, blockHash crypto.Digest) error {
startTime := time.Now()
- label := ledgercore.MakeCatchpointLabel(
- round, blockHash, dataInfo.TrieBalancesHash, dataInfo.Totals).String()
+ labelMaker := ledgercore.MakeCatchpointLabelMakerCurrent(round, &blockHash, &dataInfo.TrieBalancesHash, dataInfo.Totals, &dataInfo.StateProofVerificationHash)
+ label := ledgercore.MakeLabel(labelMaker)
ct.log.Infof(
"creating catchpoint round: %d accountsRound: %d label: %s",
@@ -747,7 +766,7 @@ func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound
// Make a catchpoint file.
header := CatchpointFileHeader{
- Version: CatchpointFileVersionV6,
+ Version: CatchpointFileVersionV7,
BalancesRound: accountsRound,
BlocksRound: round,
Totals: dataInfo.Totals,
@@ -909,10 +928,10 @@ func (ct *catchpointTracker) postCommitUnlocked(ctx context.Context, dcc *deferr
}
}
-// handleUnorderedCommit is a special method for handling deferred commits that are out of order.
+// handleUnorderedCommitOrError is a special method for handling deferred commits that are out of order.
// Tracker might update own state in this case. For example, account catchpoint tracker cancels
// scheduled catchpoint writing that deferred commit.
-func (ct *catchpointTracker) handleUnorderedCommit(dcc *deferredCommitContext) {
+func (ct *catchpointTracker) handleUnorderedCommitOrError(dcc *deferredCommitContext) {
// if the node is configured to generate catchpoint files, we might need to update the catchpointWriting variable.
if ct.enableGeneratingCatchpointFiles {
// determine if this was a catchpoint round
@@ -1068,20 +1087,24 @@ func (ct *catchpointTracker) accountsUpdateBalances(accountsDeltas compactAccoun
return
}
-// IsWritingCatchpointDataFile returns true iff a (first stage) catchpoint data file
+// isWritingCatchpointDataFile returns true iff a (first stage) catchpoint data file
// is being generated.
-func (ct *catchpointTracker) IsWritingCatchpointDataFile() bool {
+func (ct *catchpointTracker) isWritingCatchpointDataFile() bool {
return atomic.LoadInt32(&ct.catchpointDataWriting) != 0
}
// Generates a (first stage) catchpoint data file.
-func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, accountsRound basics.Round, updatingBalancesDuration time.Duration) (totalKVs, totalAccounts, totalChunks, biggestChunkLen uint64, err error) {
+// The file is built in the following order:
+// - Catchpoint file header (named content.msgpack). The header is generated and appended to the file at the end of the
+// second stage of catchpoint generation.
+// - State proof verification data chunk (named stateProofVerificationContext.msgpack).
+// - Balance and KV chunk (named balances.x.msgpack).
+// ...
+// - Balance and KV chunk (named balances.x.msgpack).
+func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, accountsRound basics.Round, catchpointGenerationStats *telemetryspec.CatchpointGenerationEventDetails) (totalKVs, totalAccounts, totalChunks, biggestChunkLen uint64, spVerificationHash crypto.Digest, err error) {
ct.log.Debugf("catchpointTracker.generateCatchpointData() writing catchpoint accounts for round %d", accountsRound)
startTime := time.Now()
- catchpointGenerationStats := telemetryspec.CatchpointGenerationEventDetails{
- BalancesWriteTime: uint64(updatingBalancesDuration.Nanoseconds()),
- }
catchpointDataFilePath := filepath.Join(ct.dbDirectory, trackerdb.CatchpointDirName)
catchpointDataFilePath =
@@ -1099,6 +1122,7 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account
}
var catchpointWriter *catchpointWriter
+
start := time.Now()
ledgerGeneratecatchpointCount.Inc(nil)
err = ct.dbs.TransactionContext(ctx, func(dbCtx context.Context, tx trackerdb.TransactionScope) (err error) {
@@ -1106,6 +1130,12 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account
if err != nil {
return
}
+
+ spVerificationHash, err = catchpointWriter.WriteStateProofVerificationContext()
+ if err != nil {
+ return
+ }
+
for more {
stepCtx, stepCancelFunction := context.WithTimeout(dbCtx, chunkExecutionDuration)
writeStepStartTime := time.Now()
@@ -1156,28 +1186,19 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account
ledgerGeneratecatchpointMicros.AddMicrosecondsSince(start, nil)
if err != nil {
ct.log.Warnf("catchpointTracker.generateCatchpointData() %v", err)
- return 0, 0, 0, 0, err
+ return 0, 0, 0, 0, crypto.Digest{}, err
}
catchpointGenerationStats.FileSize = uint64(catchpointWriter.writtenBytes)
catchpointGenerationStats.WritingDuration = uint64(time.Since(startTime).Nanoseconds())
catchpointGenerationStats.AccountsCount = catchpointWriter.totalAccounts
catchpointGenerationStats.KVsCount = catchpointWriter.totalKVs
- ct.log.EventWithDetails(telemetryspec.Accounts, telemetryspec.CatchpointGenerationEvent, catchpointGenerationStats)
- ct.log.With("accountsRound", accountsRound).
- With("writingDuration", catchpointGenerationStats.WritingDuration).
- With("CPUTime", catchpointGenerationStats.CPUTime).
- With("balancesWriteTime", catchpointGenerationStats.BalancesWriteTime).
- With("accountsCount", catchpointGenerationStats.AccountsCount).
- With("kvsCount", catchpointGenerationStats.KVsCount).
- With("fileSize", catchpointGenerationStats.FileSize).
- With("catchpointLabel", catchpointGenerationStats.CatchpointLabel).
- Infof("Catchpoint data file was generated")
+ catchpointGenerationStats.AccountsRound = uint64(accountsRound)
- return catchpointWriter.totalKVs, catchpointWriter.totalAccounts, catchpointWriter.chunkNum, catchpointWriter.biggestChunkLen, nil
+ return catchpointWriter.totalKVs, catchpointWriter.totalAccounts, catchpointWriter.chunkNum, catchpointWriter.biggestChunkLen, spVerificationHash, nil
}
-func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx trackerdb.TransactionScope, accountsRound basics.Round, totalKVs uint64, totalAccounts uint64, totalChunks uint64, biggestChunkLen uint64) error {
+func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx trackerdb.TransactionScope, catchpointGenerationStats *telemetryspec.CatchpointGenerationEventDetails, accountsRound basics.Round, totalKVs uint64, totalAccounts uint64, totalChunks uint64, biggestChunkLen uint64, stateProofVerificationHash crypto.Digest) error {
arw, err := tx.MakeAccountsReaderWriter()
if err != nil {
return err
@@ -1214,14 +1235,34 @@ func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx tracke
}
info := trackerdb.CatchpointFirstStageInfo{
- Totals: accountTotals,
- TotalAccounts: totalAccounts,
- TotalKVs: totalKVs,
- TotalChunks: totalChunks,
- BiggestChunkLen: biggestChunkLen,
- TrieBalancesHash: trieBalancesHash,
- }
- return crw.InsertOrReplaceCatchpointFirstStageInfo(ctx, accountsRound, &info)
+ Totals: accountTotals,
+ TotalAccounts: totalAccounts,
+ TotalKVs: totalKVs,
+ TotalChunks: totalChunks,
+ BiggestChunkLen: biggestChunkLen,
+ TrieBalancesHash: trieBalancesHash,
+ StateProofVerificationHash: stateProofVerificationHash,
+ }
+
+ err = crw.InsertOrReplaceCatchpointFirstStageInfo(ctx, accountsRound, &info)
+ if err != nil {
+ return err
+ }
+
+ catchpointGenerationStats.MerkleTrieRootHash = base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(trieBalancesHash[:])
+ catchpointGenerationStats.SPVerificationCtxsHash = base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(stateProofVerificationHash[:])
+ ct.log.EventWithDetails(telemetryspec.Accounts, telemetryspec.CatchpointGenerationEvent, catchpointGenerationStats)
+ ct.log.With("accountsRound", catchpointGenerationStats.AccountsRound).
+ With("writingDuration", catchpointGenerationStats.WritingDuration).
+ With("CPUTime", catchpointGenerationStats.CPUTime).
+ With("balancesWriteTime", catchpointGenerationStats.BalancesWriteTime).
+ With("accountsCount", catchpointGenerationStats.AccountsCount).
+ With("kvsCount", catchpointGenerationStats.KVsCount).
+ With("fileSize", catchpointGenerationStats.FileSize).
+ With("MerkleTrieRootHash", catchpointGenerationStats.MerkleTrieRootHash).
+ With("SPVerificationCtxsHash", catchpointGenerationStats.SPVerificationCtxsHash).
+ Infof("Catchpoint data file was generated")
+ return nil
}
func makeCatchpointDataFilePath(accountsRound basics.Round) string {
@@ -1325,7 +1366,7 @@ func (ct *catchpointTracker) GetCatchpointStream(round basics.Round) (ReadCloseS
fileInfo, err := file.Stat()
if err != nil {
// we couldn't get the stat, so just return with the file.
- return &readCloseSizer{ReadCloser: file, size: -1}, nil
+ return &readCloseSizer{ReadCloser: file, size: -1}, nil //nolint:nilerr // intentionally ignoring Stat error
}
crw, err := ct.dbs.MakeCatchpointReaderWriter()
if err != nil {
diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go
index 0775d36da..84a103300 100644
--- a/ledger/catchpointtracker_test.go
+++ b/ledger/catchpointtracker_test.go
@@ -33,6 +33,7 @@ import (
"github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
@@ -42,21 +43,22 @@ import (
"github.com/algorand/go-algorand/ledger/store/trackerdb"
ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/logging/telemetryspec"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
)
-func TestIsWritingCatchpointFile(t *testing.T) {
+func TestCatchpointIsWritingCatchpointFile(t *testing.T) {
partitiontest.PartitionTest(t)
ct := &catchpointTracker{}
ct.catchpointDataWriting = -1
- ans := ct.IsWritingCatchpointDataFile()
+ ans := ct.isWritingCatchpointDataFile()
require.True(t, ans)
ct.catchpointDataWriting = 0
- ans = ct.IsWritingCatchpointDataFile()
+ ans = ct.isWritingCatchpointDataFile()
require.False(t, ans)
}
@@ -77,7 +79,7 @@ func newCatchpointTracker(tb testing.TB, l *mockLedgerForTracker, conf config.Lo
return ct
}
-func TestGetCatchpointStream(t *testing.T) {
+func TestCatchpointGetCatchpointStream(t *testing.T) {
partitiontest.PartitionTest(t)
accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
@@ -148,12 +150,12 @@ func TestGetCatchpointStream(t *testing.T) {
require.NoError(t, err)
}
-// TestAcctUpdatesDeleteStoredCatchpoints - The goal of this test is to verify that the deleteStoredCatchpoints function works correctly.
+// TestCatchpointsDeleteStored - The goal of this test is to verify that the deleteStoredCatchpoints function works correctly.
// It does so by filling up the storedcatchpoints with dummy catchpoint file entries, as well as creating these dummy files on disk.
// ( the term dummy is only because these aren't real catchpoint files, but rather a zero-length file ). Then, the test calls the function
// and ensures that it did not error, the catchpoint files were correctly deleted, and that deleteStoredCatchpoints contains no more
// entries.
-func TestAcctUpdatesDeleteStoredCatchpoints(t *testing.T) {
+func TestCatchpointsDeleteStored(t *testing.T) {
partitiontest.PartitionTest(t)
accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
@@ -205,7 +207,7 @@ func TestAcctUpdatesDeleteStoredCatchpoints(t *testing.T) {
// The test validate that when algod boots up it cleans empty catchpoint directories.
// It is done by creating empty directories in the catchpoint root directory.
// When algod boots up it should remove those directories.
-func TestSchemaUpdateDeleteStoredCatchpoints(t *testing.T) {
+func TestCatchpointsDeleteStoredOnSchemaUpdate(t *testing.T) {
partitiontest.PartitionTest(t)
// we don't want to run this test before the binary is compiled against the latest database upgrade schema.
@@ -267,6 +269,20 @@ func getNumberOfCatchpointFilesInDir(catchpointDir string) (int, error) {
return numberOfCatchpointFiles, err
}
+func calculateStateProofVerificationHash(t *testing.T, ml *mockLedgerForTracker) crypto.Digest {
+ var digest crypto.Digest
+ err := ml.dbs.Snapshot(func(dbCtx context.Context, tx trackerdb.SnapshotScope) (err error) {
+ rawData, err := tx.MakeSpVerificationCtxReader().GetAllSPContexts(dbCtx)
+ require.NoError(t, err)
+
+ wrappedData := catchpointStateProofVerificationContext{Data: rawData}
+ digest = crypto.HashObj(wrappedData)
+ return nil
+ })
+ require.NoError(t, err)
+ return digest
+}
+
// The goal of this test is to check that we are saving at most X catchpoint files.
// If algod needs to create a new catchpoint file it will delete the oldest.
// In addition, when deleting old catchpoint files an empty directory should be deleted
@@ -297,13 +313,7 @@ func TestRecordCatchpointFile(t *testing.T) {
for _, round := range []basics.Round{2000000, 3000010, 3000015, 3000020} {
accountsRound := round - 1
-
- _, _, _, biggestChunkLen, err := ct.generateCatchpointData(
- context.Background(), accountsRound, time.Second)
- require.NoError(t, err)
-
- err = ct.createCatchpoint(context.Background(), accountsRound, round, trackerdb.CatchpointFirstStageInfo{BiggestChunkLen: biggestChunkLen}, crypto.Digest{})
- require.NoError(t, err)
+ createCatchpoint(t, ct, accountsRound, ml, round)
}
numberOfCatchpointFiles, err := getNumberOfCatchpointFilesInDir(temporaryDirectory)
@@ -317,6 +327,87 @@ func TestRecordCatchpointFile(t *testing.T) {
require.Equalf(t, onlyCatchpointDirEmpty, true, "Directories: %v", emptyDirs)
}
+func createCatchpoint(t *testing.T, ct *catchpointTracker, accountsRound basics.Round, ml *mockLedgerForTracker, round basics.Round) {
+ var catchpointGenerationStats telemetryspec.CatchpointGenerationEventDetails
+ _, _, _, biggestChunkLen, stateProofVerificationHash, err := ct.generateCatchpointData(
+ context.Background(), accountsRound, &catchpointGenerationStats)
+ require.NoError(t, err)
+
+ require.Equal(t, calculateStateProofVerificationHash(t, ml), stateProofVerificationHash)
+
+ err = ct.createCatchpoint(context.Background(), accountsRound, round, trackerdb.CatchpointFirstStageInfo{BiggestChunkLen: biggestChunkLen}, crypto.Digest{})
+ require.NoError(t, err)
+}
+
+// TestCatchpointFileWithLargeSpVerification makes sure that CatchpointFirstStageInfo.BiggestChunkLen is calculated based on state proof verification contexts
+// as well as other chunks in the catchpoint files.
+func TestCatchpointFileWithLargeSpVerification(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ temporaryDirectory := t.TempDir()
+
+ accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+ ml := makeMockLedgerForTracker(t, true, 10, protocol.ConsensusCurrentVersion, accts)
+ defer ml.Close()
+
+ ct := &catchpointTracker{}
+ conf := config.GetDefaultLocal()
+
+ conf.Archival = true
+ ct.initialize(conf, ".")
+ defer ct.close()
+ ct.dbDirectory = temporaryDirectory
+
+ _, err := trackerDBInitialize(ml, true, ct.dbDirectory)
+ require.NoError(t, err)
+
+ err = ct.loadFromDisk(ml, ml.Latest())
+ require.NoError(t, err)
+
+ // create catpoint with no sp verification data
+ round := basics.Round(2000000)
+ createCatchpoint(t, ct, round-1, ml, round)
+
+ numberOfCatchpointFiles, err := getNumberOfCatchpointFilesInDir(temporaryDirectory)
+ require.NoError(t, err)
+ require.Equal(t, 1, numberOfCatchpointFiles)
+ // create catpoint with 2 sp verification data
+ writeDummySpVerification(t, 0, 3, ml)
+
+ round = basics.Round(3000000)
+ createCatchpoint(t, ct, round-1, ml, round)
+
+ numberOfCatchpointFiles, err = getNumberOfCatchpointFilesInDir(temporaryDirectory)
+ require.NoError(t, err)
+ require.Equal(t, 2, numberOfCatchpointFiles)
+
+ // create catpoint with 500 sp verification data - the sp verification chunk should be the largest
+ writeDummySpVerification(t, 4, 500, ml)
+
+ round = basics.Round(4000000)
+ createCatchpoint(t, ct, round-1, ml, round)
+
+ numberOfCatchpointFiles, err = getNumberOfCatchpointFilesInDir(temporaryDirectory)
+ require.NoError(t, err)
+ require.Equal(t, 3, numberOfCatchpointFiles)
+}
+
+func writeDummySpVerification(t *testing.T, nextIndexForContext uint64, numberOfContexts uint64, ml *mockLedgerForTracker) {
+ err := ml.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error {
+
+ contexts := make([]*ledgercore.StateProofVerificationContext, numberOfContexts)
+ for i := uint64(0); i < numberOfContexts; i++ {
+ e := ledgercore.StateProofVerificationContext{}
+ e.LastAttestedRound = basics.Round(nextIndexForContext + i)
+ contexts[i] = &e
+ }
+ writer := tx.MakeSpVerificationCtxReaderWriter()
+
+ return writer.StoreSPContexts(ctx, contexts[:])
+ })
+ require.NoError(t, err)
+}
+
func BenchmarkLargeCatchpointDataWriting(b *testing.B) {
proto := config.Consensus[protocol.ConsensusCurrentVersion]
@@ -370,13 +461,14 @@ func BenchmarkLargeCatchpointDataWriting(b *testing.B) {
})
require.NoError(b, err)
+ var catchpointGenerationStats telemetryspec.CatchpointGenerationEventDetails
b.ResetTimer()
- ct.generateCatchpointData(context.Background(), basics.Round(0), time.Second)
+ ct.generateCatchpointData(context.Background(), basics.Round(0), &catchpointGenerationStats)
b.StopTimer()
b.ReportMetric(float64(accountsNumber), "accounts")
}
-func TestReproducibleCatchpointLabels(t *testing.T) {
+func TestCatchpointReproducibleLabels(t *testing.T) {
partitiontest.PartitionTest(t)
if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
@@ -387,7 +479,7 @@ func TestReproducibleCatchpointLabels(t *testing.T) {
testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestReproducibleCatchpointLabels")
protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
protoParams.CatchpointLookback = 32
- protoParams.EnableOnlineAccountCatchpoints = true
+ protoParams.EnableCatchpointsWithSPContexts = true
config.Consensus[testProtocolVersion] = protoParams
defer func() {
delete(config.Consensus, testProtocolVersion)
@@ -401,6 +493,7 @@ func TestReproducibleCatchpointLabels(t *testing.T) {
defer ml.Close()
cfg := config.GetDefaultLocal()
+ cfg.MaxAcctLookback = 2
cfg.CatchpointInterval = 50
cfg.CatchpointTracking = 1
ct := newCatchpointTracker(t, ml, cfg, ".")
@@ -417,10 +510,19 @@ func TestReproducibleCatchpointLabels(t *testing.T) {
catchpointLabels := make(map[basics.Round]string)
ledgerHistory := make(map[basics.Round]*mockLedgerForTracker)
roundDeltas := make(map[basics.Round]ledgercore.StateDelta)
- numCatchpointsCreated := 0
+
+ isCatchpointRound := func(rnd basics.Round) bool {
+ return (uint64(rnd) >= cfg.MaxAcctLookback) &&
+ (uint64(rnd)-cfg.MaxAcctLookback > protoParams.CatchpointLookback) &&
+ ((uint64(rnd)-cfg.MaxAcctLookback)%cfg.CatchpointInterval == 0)
+ }
+ isDataFileRound := func(rnd basics.Round) bool {
+ return ((uint64(rnd)-cfg.MaxAcctLookback+protoParams.CatchpointLookback)%cfg.CatchpointInterval == 0)
+ }
+
i := basics.Round(0)
+ numCatchpointsCreated := 0
lastCatchpointLabel := ""
-
for numCatchpointsCreated < testCatchpointLabelsCount {
i++
rewardLevelDelta := crypto.RandUint64() % 5
@@ -456,41 +558,45 @@ func TestReproducibleCatchpointLabels(t *testing.T) {
delta.Creatables = creatablesFromUpdates(base, updates, knownCreatables)
delta.Totals = newTotals
- ml.trackers.newBlock(blk, delta)
- ml.trackers.committedUpTo(i)
- ml.addMockBlock(blockEntry{block: blk}, delta)
+ ml.addBlock(blockEntry{block: blk}, delta)
accts = append(accts, newAccts)
rewardsLevels = append(rewardsLevels, rewardLevel)
roundDeltas[i] = delta
- // If we made a catchpoint, save the label.
- if (uint64(i) >= cfg.MaxAcctLookback) && (uint64(i)-cfg.MaxAcctLookback > protoParams.CatchpointLookback) && ((uint64(i)-cfg.MaxAcctLookback)%cfg.CatchpointInterval == 0) {
+ // determine if there is a data file round and commit
+ if isDataFileRound(i) || isCatchpointRound(i) {
+ ml.trackers.committedUpTo(i)
ml.trackers.waitAccountsWriting()
+
+ // Let catchpoint data generation finish so that nothing gets skipped.
+ for ct.isWritingCatchpointDataFile() {
+ time.Sleep(time.Millisecond)
+ }
+ }
+
+ // If we made a catchpoint, save the label.
+ if isCatchpointRound(i) {
catchpointLabels[i] = ct.GetLastCatchpointLabel()
+ require.NotEmpty(t, catchpointLabels[i])
require.NotEqual(t, lastCatchpointLabel, catchpointLabels[i])
lastCatchpointLabel = catchpointLabels[i]
ledgerHistory[i] = ml.fork(t)
defer ledgerHistory[i].Close()
numCatchpointsCreated++
}
-
- // Let catchpoint data generation finish so that nothing gets skipped.
- for ct.IsWritingCatchpointDataFile() {
- time.Sleep(time.Millisecond)
- }
}
lastRound := i
// Test in reverse what happens when we try to repeat the exact same blocks.
// Start off with the catchpoint before the last one.
- for startingRound := lastRound - basics.Round(cfg.CatchpointInterval); uint64(startingRound) > protoParams.CatchpointLookback; startingRound -= basics.Round(cfg.CatchpointInterval) {
+ for rnd := lastRound - basics.Round(cfg.CatchpointInterval); uint64(rnd) > protoParams.CatchpointLookback; rnd -= basics.Round(cfg.CatchpointInterval) {
au.close()
- ml2 := ledgerHistory[startingRound]
+ ml2 := ledgerHistory[rnd]
require.NotNil(t, ml2)
ct2 := newCatchpointTracker(t, ml2, cfg, ".")
defer ct2.close()
- for i := startingRound + 1; i <= lastRound; i++ {
+ for i := rnd + 1; i <= lastRound; i++ {
blk := bookkeeping.Block{
BlockHeader: bookkeeping.BlockHeader{
Round: basics.Round(i),
@@ -500,18 +606,19 @@ func TestReproducibleCatchpointLabels(t *testing.T) {
blk.CurrentProtocol = testProtocolVersion
delta := roundDeltas[i]
- ml2.trackers.newBlock(blk, delta)
- ml2.trackers.committedUpTo(i)
+ ml2.addBlock(blockEntry{block: blk}, delta)
- // if this is a catchpoint round, check the label.
- if (uint64(i) >= cfg.MaxAcctLookback) && (uint64(i)-cfg.MaxAcctLookback > protoParams.CatchpointLookback) && ((uint64(i)-cfg.MaxAcctLookback)%cfg.CatchpointInterval == 0) {
+ if isDataFileRound(i) || isCatchpointRound(i) {
+ ml2.trackers.committedUpTo(i)
ml2.trackers.waitAccountsWriting()
- require.Equal(t, catchpointLabels[i], ct2.GetLastCatchpointLabel())
+ // Let catchpoint data generation finish so that nothing gets skipped.
+ for ct.isWritingCatchpointDataFile() {
+ time.Sleep(time.Millisecond)
+ }
}
-
- // Let catchpoint data generation finish so that nothing gets skipped.
- for ct.IsWritingCatchpointDataFile() {
- time.Sleep(time.Millisecond)
+ // if this is a catchpoint round, check the label.
+ if isCatchpointRound(i) {
+ require.Equal(t, catchpointLabels[i], ct2.GetLastCatchpointLabel())
}
}
}
@@ -538,6 +645,7 @@ type blockingTracker struct {
committedUpToRound int64
alwaysLock bool
shouldLockPostCommit bool
+ shouldLockPostCommitUnlocked bool
}
// loadFromDisk is not implemented in the blockingTracker.
@@ -580,14 +688,14 @@ func (bt *blockingTracker) postCommit(ctx context.Context, dcc *deferredCommitCo
// postCommitUnlocked implements entry/exit blockers, designed for testing.
func (bt *blockingTracker) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) {
- if bt.alwaysLock || dcc.catchpointFirstStage {
+ if bt.alwaysLock || dcc.catchpointFirstStage || bt.shouldLockPostCommitUnlocked {
bt.postCommitUnlockedEntryLock <- struct{}{}
<-bt.postCommitUnlockedReleaseLock
}
}
-// handleUnorderedCommit is not used by the blockingTracker
-func (bt *blockingTracker) handleUnorderedCommit(*deferredCommitContext) {
+// handleUnorderedCommitOrError is not used by the blockingTracker
+func (bt *blockingTracker) handleUnorderedCommitOrError(*deferredCommitContext) {
}
// close is not used by the blockingTracker
@@ -597,9 +705,9 @@ func (bt *blockingTracker) close() {
func TestCatchpointTrackerNonblockingCatchpointWriting(t *testing.T) {
partitiontest.PartitionTest(t)
- testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestReproducibleCatchpointLabels")
+ testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestCatchpointTrackerNonblockingCatchpointWriting")
protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
- protoParams.EnableOnlineAccountCatchpoints = true
+ protoParams.EnableCatchpointsWithSPContexts = true
protoParams.CatchpointLookback = protoParams.MaxBalLookback
config.Consensus[testProtocolVersion] = protoParams
defer func() {
@@ -740,6 +848,94 @@ func TestCatchpointTrackerNonblockingCatchpointWriting(t *testing.T) {
}
}
+// TestCatchpointTrackerWaitNotBlocking checks a tracker with long postCommitUnlocked does not block blockq (notifyCommit) goroutine
+func TestCatchpointTrackerWaitNotBlocking(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 10)
+ const inMem = true
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Warn)
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ ledger, err := OpenLedger(log, t.Name(), inMem, genesisInitState, cfg)
+ require.NoError(t, err)
+ defer ledger.Close()
+
+ writeStallingTracker := &blockingTracker{
+ postCommitUnlockedEntryLock: make(chan struct{}),
+ postCommitUnlockedReleaseLock: make(chan struct{}),
+ shouldLockPostCommitUnlocked: true,
+ }
+ ledger.trackerMu.Lock()
+ ledger.trackers.mu.Lock()
+ ledger.trackers.trackers = append(ledger.trackers.trackers, writeStallingTracker)
+ ledger.trackers.mu.Unlock()
+ ledger.trackerMu.Unlock()
+
+ startRound := ledger.Latest() + 1
+ endRound := basics.Round(20)
+ addBlockDone := make(chan struct{})
+
+ // release the blocking tracker when the test is done
+ defer func() {
+ // unblocking from another goroutine is a bit complicated:
+ // this function should not quit until postCommitUnlockedReleaseLock is consumed
+ // to do that, write to it first and do not exit until consumed,
+ // otherwise we might exit and leave the tracker registry's syncer goroutine blocked
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+ go func() {
+ writeStallingTracker.postCommitUnlockedReleaseLock <- struct{}{}
+ wg.Done()
+ }()
+
+ // consume to unblock
+ <-writeStallingTracker.postCommitUnlockedEntryLock
+ // disable further blocking
+ writeStallingTracker.shouldLockPostCommitUnlocked = false
+
+ // wait the writeStallingTracker.postCommitUnlockedReleaseLock passes
+ wg.Wait()
+
+ // at the end, what while the addBlock goroutine finishes
+ // consume to unblock
+ <-addBlockDone
+ }()
+
+ // tracker commits are now blocked, add some blocks
+ timer := time.NewTimer(1 * time.Second)
+ go func() {
+ defer close(addBlockDone)
+ blk := genesisInitState.Block
+ for rnd := startRound; rnd <= endRound; rnd++ {
+ blk.BlockHeader.Round = rnd
+ blk.BlockHeader.TimeStamp = int64(blk.BlockHeader.Round)
+ err := ledger.AddBlock(blk, agreement.Certificate{})
+ require.NoError(t, err)
+ }
+ }()
+
+ select {
+ case <-timer.C:
+ require.FailNow(t, "timeout")
+ case <-addBlockDone:
+ }
+
+ // switch context one more time to give the blockqueue syncer to run
+ time.Sleep(1 * time.Millisecond)
+
+ // ensure Ledger.Wait() is non-blocked for all rounds except the last one (due to possible races)
+ for rnd := startRound; rnd < endRound; rnd++ {
+ done := ledger.Wait(rnd)
+ select {
+ case <-done:
+ default:
+ require.FailNow(t, fmt.Sprintf("Wait(%d) is blocked", rnd))
+ }
+ }
+}
+
func TestCalculateFirstStageRounds(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -827,7 +1023,7 @@ func TestCalculateCatchpointRounds(t *testing.T) {
// Test that pruning first stage catchpoint database records and catchpoint data files
// works.
-func TestFirstStageInfoPruning(t *testing.T) {
+func TestCatchpointFirstStageInfoPruning(t *testing.T) {
partitiontest.PartitionTest(t)
// create new protocol version, which has lower lookback
@@ -835,7 +1031,7 @@ func TestFirstStageInfoPruning(t *testing.T) {
protocol.ConsensusVersion("test-protocol-TestFirstStageInfoPruning")
protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
protoParams.CatchpointLookback = 32
- protoParams.EnableOnlineAccountCatchpoints = true
+ protoParams.EnableCatchpointsWithSPContexts = true
config.Consensus[testProtocolVersion] = protoParams
defer func() {
delete(config.Consensus, testProtocolVersion)
@@ -861,6 +1057,15 @@ func TestFirstStageInfoPruning(t *testing.T) {
expectedNumEntries := protoParams.CatchpointLookback / cfg.CatchpointInterval
+ isCatchpointRound := func(rnd basics.Round) bool {
+ return (uint64(rnd) >= cfg.MaxAcctLookback) &&
+ (uint64(rnd)-cfg.MaxAcctLookback > protoParams.CatchpointLookback) &&
+ ((uint64(rnd)-cfg.MaxAcctLookback)%cfg.CatchpointInterval == 0)
+ }
+ isDataFileRound := func(rnd basics.Round) bool {
+ return ((uint64(rnd)-cfg.MaxAcctLookback+protoParams.CatchpointLookback)%cfg.CatchpointInterval == 0)
+ }
+
numCatchpointsCreated := uint64(0)
i := basics.Round(0)
lastCatchpointLabel := ""
@@ -878,22 +1083,23 @@ func TestFirstStageInfoPruning(t *testing.T) {
}
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, 0, 0)
- ml.trackers.newBlock(blk, delta)
- ml.trackers.committedUpTo(i)
- ml.addMockBlock(blockEntry{block: blk}, delta)
+ ml.addBlock(blockEntry{block: blk}, delta)
- if (uint64(i) >= cfg.MaxAcctLookback) && (uint64(i)-cfg.MaxAcctLookback > protoParams.CatchpointLookback) && ((uint64(i)-cfg.MaxAcctLookback)%cfg.CatchpointInterval == 0) {
+ if isDataFileRound(i) || isCatchpointRound(i) {
+ ml.trackers.committedUpTo(i)
ml.trackers.waitAccountsWriting()
+ // Let catchpoint data generation finish so that nothing gets skipped.
+ for ct.isWritingCatchpointDataFile() {
+ time.Sleep(time.Millisecond)
+ }
+ }
+
+ if isCatchpointRound(i) {
catchpointLabel := ct.GetLastCatchpointLabel()
require.NotEqual(t, lastCatchpointLabel, catchpointLabel)
lastCatchpointLabel = catchpointLabel
numCatchpointsCreated++
}
-
- // Let catchpoint data generation finish so that nothing gets skipped.
- for ct.IsWritingCatchpointDataFile() {
- time.Sleep(time.Millisecond)
- }
}
numEntries := uint64(0)
@@ -921,7 +1127,7 @@ func TestFirstStageInfoPruning(t *testing.T) {
// Test that on startup the catchpoint tracker restarts catchpoint's first stage if
// there is an unfinished first stage record in the database.
-func TestFirstStagePersistence(t *testing.T) {
+func TestCatchpointFirstStagePersistence(t *testing.T) {
partitiontest.PartitionTest(t)
// create new protocol version, which has lower lookback
@@ -929,7 +1135,7 @@ func TestFirstStagePersistence(t *testing.T) {
protocol.ConsensusVersion("test-protocol-TestFirstStagePersistence")
protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
protoParams.CatchpointLookback = 32
- protoParams.EnableOnlineAccountCatchpoints = true
+ protoParams.EnableCatchpointsWithSPContexts = true
config.Consensus[testProtocolVersion] = protoParams
defer func() {
delete(config.Consensus, testProtocolVersion)
@@ -964,11 +1170,9 @@ func TestFirstStagePersistence(t *testing.T) {
}
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, 0, 0)
- ml.trackers.newBlock(blk, delta)
- ml.trackers.committedUpTo(i)
- ml.addMockBlock(blockEntry{block: blk}, delta)
+ ml.addBlock(blockEntry{block: blk}, delta)
}
-
+ ml.trackers.committedUpTo(firstStageRound)
ml.trackers.waitAccountsWriting()
// Check that the data file exists.
@@ -1024,7 +1228,7 @@ func TestFirstStagePersistence(t *testing.T) {
// Test that on startup the catchpoint tracker restarts catchpoint's second stage if
// there is an unfinished catchpoint record in the database.
-func TestSecondStagePersistence(t *testing.T) {
+func TestCatchpointSecondStagePersistence(t *testing.T) {
partitiontest.PartitionTest(t)
// create new protocol version, which has lower lookback
@@ -1032,7 +1236,7 @@ func TestSecondStagePersistence(t *testing.T) {
protocol.ConsensusVersion("test-protocol-TestFirstStagePersistence")
protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
protoParams.CatchpointLookback = 32
- protoParams.EnableOnlineAccountCatchpoints = true
+ protoParams.EnableCatchpointsWithSPContexts = true
config.Consensus[testProtocolVersion] = protoParams
defer func() {
delete(config.Consensus, testProtocolVersion)
@@ -1054,6 +1258,15 @@ func TestSecondStagePersistence(t *testing.T) {
t, ml, cfg, filepath.Join(tempDirectory, config.LedgerFilenamePrefix))
defer ct.close()
+ isCatchpointRound := func(rnd basics.Round) bool {
+ return (uint64(rnd) >= cfg.MaxAcctLookback) &&
+ (uint64(rnd)-cfg.MaxAcctLookback > protoParams.CatchpointLookback) &&
+ ((uint64(rnd)-cfg.MaxAcctLookback)%cfg.CatchpointInterval == 0)
+ }
+ isDataFileRound := func(rnd basics.Round) bool {
+ return ((uint64(rnd)-cfg.MaxAcctLookback+protoParams.CatchpointLookback)%cfg.CatchpointInterval == 0)
+ }
+
secondStageRound := basics.Round(36)
firstStageRound := secondStageRound - basics.Round(protoParams.CatchpointLookback)
catchpointDataFilePath :=
@@ -1085,18 +1298,18 @@ func TestSecondStagePersistence(t *testing.T) {
}
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, 0, 0)
- ml.trackers.newBlock(blk, delta)
- ml.trackers.committedUpTo(i)
- ml.addMockBlock(blockEntry{block: blk}, delta)
+ ml.addBlock(blockEntry{block: blk}, delta)
- // Let catchpoint data generation finish so that nothing gets skipped.
- for ct.IsWritingCatchpointDataFile() {
- time.Sleep(time.Millisecond)
+ if isDataFileRound(i) || isCatchpointRound(i) {
+ ml.trackers.committedUpTo(i)
+ ml.trackers.waitAccountsWriting()
+ // Let catchpoint data generation finish so that nothing gets skipped.
+ for ct.isWritingCatchpointDataFile() {
+ time.Sleep(time.Millisecond)
+ }
}
}
- ml.trackers.waitAccountsWriting()
-
// Check that the data file exists.
catchpointFilePath :=
filepath.Join(catchpointsDirectory, trackerdb.MakeCatchpointFilePath(secondStageRound))
@@ -1161,7 +1374,7 @@ func TestSecondStagePersistence(t *testing.T) {
// Test that when catchpoint's first stage record is unavailable
// (e.g. catchpoints were disabled at first stage), the unfinished catchpoint
// database record is deleted.
-func TestSecondStageDeletesUnfinishedCatchpointRecord(t *testing.T) {
+func TestCatchpointSecondStageDeletesUnfinishedCatchpointRecord(t *testing.T) {
partitiontest.PartitionTest(t)
// create new protocol version, which has lower lookback
@@ -1169,7 +1382,7 @@ func TestSecondStageDeletesUnfinishedCatchpointRecord(t *testing.T) {
protocol.ConsensusVersion("test-protocol-TestFirstStagePersistence")
protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
protoParams.CatchpointLookback = 32
- protoParams.EnableOnlineAccountCatchpoints = true
+ protoParams.EnableCatchpointsWithSPContexts = true
config.Consensus[testProtocolVersion] = protoParams
defer func() {
delete(config.Consensus, testProtocolVersion)
@@ -1206,7 +1419,7 @@ func TestSecondStageDeletesUnfinishedCatchpointRecord(t *testing.T) {
ml.trackers.newBlock(blk, delta)
ml.trackers.committedUpTo(i)
- ml.addMockBlock(blockEntry{block: blk}, delta)
+ ml.addToBlockQueue(blockEntry{block: blk}, delta)
}
ml.trackers.waitAccountsWriting()
@@ -1237,7 +1450,7 @@ func TestSecondStageDeletesUnfinishedCatchpointRecord(t *testing.T) {
ml2.trackers.newBlock(blk, delta)
ml2.trackers.committedUpTo(secondStageRound)
- ml2.addMockBlock(blockEntry{block: blk}, delta)
+ ml2.addToBlockQueue(blockEntry{block: blk}, delta)
}
ml2.trackers.waitAccountsWriting()
@@ -1250,7 +1463,7 @@ func TestSecondStageDeletesUnfinishedCatchpointRecord(t *testing.T) {
// Test that on startup the catchpoint tracker deletes the unfinished catchpoint
// database record when the first stage database record is missing.
-func TestSecondStageDeletesUnfinishedCatchpointRecordAfterRestart(t *testing.T) {
+func TestCatchpointSecondStageDeletesUnfinishedCatchpointRecordAfterRestart(t *testing.T) {
partitiontest.PartitionTest(t)
// create new protocol version, which has lower lookback
@@ -1258,7 +1471,7 @@ func TestSecondStageDeletesUnfinishedCatchpointRecordAfterRestart(t *testing.T)
protocol.ConsensusVersion("test-protocol-TestFirstStagePersistence")
protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
protoParams.CatchpointLookback = 32
- protoParams.EnableOnlineAccountCatchpoints = true
+ protoParams.EnableCatchpointsWithSPContexts = true
config.Consensus[testProtocolVersion] = protoParams
defer func() {
delete(config.Consensus, testProtocolVersion)
@@ -1293,10 +1506,10 @@ func TestSecondStageDeletesUnfinishedCatchpointRecordAfterRestart(t *testing.T)
ml.trackers.newBlock(blk, delta)
ml.trackers.committedUpTo(i)
- ml.addMockBlock(blockEntry{block: blk}, delta)
+ ml.addToBlockQueue(blockEntry{block: blk}, delta)
// Let catchpoint data generation finish so that nothing gets skipped.
- for ct.IsWritingCatchpointDataFile() {
+ for ct.isWritingCatchpointDataFile() {
time.Sleep(time.Millisecond)
}
}
@@ -1480,7 +1693,7 @@ func TestCatchpointFastUpdates(t *testing.T) {
t.Skip("This test is too slow on ARM and causes CI builds to time out")
}
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ proto := config.Consensus[protocol.ConsensusFuture]
accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
addSinkAndPoolAccounts(accts)
@@ -1490,7 +1703,7 @@ func TestCatchpointFastUpdates(t *testing.T) {
conf.CatchpointInterval = 1
conf.CatchpointTracking = 1
initialBlocksCount := int(conf.MaxAcctLookback)
- ml := makeMockLedgerForTracker(t, true, initialBlocksCount, protocol.ConsensusCurrentVersion, accts)
+ ml := makeMockLedgerForTracker(t, true, initialBlocksCount, protocol.ConsensusFuture, accts)
defer ml.Close()
ct := newCatchpointTracker(t, ml, conf, ".")
@@ -1539,11 +1752,12 @@ func TestCatchpointFastUpdates(t *testing.T) {
},
}
blk.RewardsLevel = rewardLevel
- blk.CurrentProtocol = protocol.ConsensusCurrentVersion
+ blk.CurrentProtocol = protocol.ConsensusFuture
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
- ml.trackers.newBlock(blk, delta)
+ delta.Totals = accumulateTotals(t, protocol.ConsensusCurrentVersion, []map[basics.Address]ledgercore.AccountData{totals}, rewardLevel)
+ ml.addBlock(blockEntry{block: blk}, delta)
accts = append(accts, newAccts)
rewardsLevels = append(rewardsLevels, rewardLevel)
@@ -1553,8 +1767,8 @@ func TestCatchpointFastUpdates(t *testing.T) {
ml.trackers.committedUpTo(round)
}(i)
}
- ml.trackers.waitAccountsWriting()
wg.Wait()
+ ml.trackers.waitAccountsWriting()
require.NotEmpty(t, ct.GetLastCatchpointLabel())
}
@@ -1577,6 +1791,7 @@ func TestCatchpointLargeAccountCountCatchpointGeneration(t *testing.T) {
testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestLargeAccountCountCatchpointGeneration")
protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
protoParams.CatchpointLookback = 16
+ protoParams.EnableCatchpointsWithSPContexts = true
config.Consensus[testProtocolVersion] = protoParams
defer func() {
delete(config.Consensus, testProtocolVersion)
@@ -1643,7 +1858,7 @@ func TestCatchpointLargeAccountCountCatchpointGeneration(t *testing.T) {
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
- ml.trackers.newBlock(blk, delta)
+ ml.addBlock(blockEntry{block: blk}, delta)
accts = append(accts, newAccts)
rewardsLevels = append(rewardsLevels, rewardLevel)
diff --git a/ledger/catchpointwriter.go b/ledger/catchpointwriter.go
index f912fbf6f..df8b72ba6 100644
--- a/ledger/catchpointwriter.go
+++ b/ledger/catchpointwriter.go
@@ -24,7 +24,9 @@ import (
"os"
"path/filepath"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/ledger/encoded"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/ledger/store/trackerdb"
"github.com/algorand/go-algorand/protocol"
)
@@ -38,6 +40,11 @@ const (
// 100,000 resources * 20KB/resource => roughly max 2GB per chunk if all of them are max'ed out apps.
// In reality most entries are asset holdings, and they are very small.
ResourcesPerCatchpointFileChunk = 100_000
+
+ // SPContextPerCatchpointFile defines the maximum number of state proof verification data stored
+ // in the catchpoint file.
+ // (2 years * 31536000 seconds per year) / (256 rounds per state proof verification data * 3.6 seconds per round) ~= 70000
+ SPContextPerCatchpointFile = 70000
)
// catchpointWriter is the struct managing the persistence of accounts data into the catchpoint file.
@@ -91,6 +98,15 @@ func (chunk catchpointFileChunkV6) empty() bool {
return len(chunk.Balances) == 0 && len(chunk.KVs) == 0
}
+type catchpointStateProofVerificationContext struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+ Data []ledgercore.StateProofVerificationContext `codec:"spd,allocbound=SPContextPerCatchpointFile"`
+}
+
+func (data catchpointStateProofVerificationContext) ToBeHashed() (protocol.HashID, []byte) {
+ return protocol.StateProofVerCtx, protocol.Encode(&data)
+}
+
func makeCatchpointWriter(ctx context.Context, filePath string, tx trackerdb.TransactionScope, maxResourcesPerChunk int) (*catchpointWriter, error) {
arw, err := tx.MakeAccountsReaderWriter()
if err != nil {
@@ -144,6 +160,37 @@ func (cw *catchpointWriter) Abort() error {
return os.Remove(cw.filePath)
}
+func (cw *catchpointWriter) WriteStateProofVerificationContext() (crypto.Digest, error) {
+ rawData, err := cw.tx.MakeSpVerificationCtxReaderWriter().GetAllSPContexts(cw.ctx)
+ if err != nil {
+ return crypto.Digest{}, err
+ }
+
+ wrappedData := catchpointStateProofVerificationContext{Data: rawData}
+ dataHash, encodedData := crypto.EncodeAndHash(wrappedData)
+
+ err = cw.tar.WriteHeader(&tar.Header{
+ Name: catchpointSPVerificationFileName,
+ Mode: 0600,
+ Size: int64(len(encodedData)),
+ })
+
+ if err != nil {
+ return crypto.Digest{}, err
+ }
+
+ _, err = cw.tar.Write(encodedData)
+ if err != nil {
+ return crypto.Digest{}, err
+ }
+
+ if chunkLen := uint64(len(encodedData)); cw.biggestChunkLen < chunkLen {
+ cw.biggestChunkLen = chunkLen
+ }
+
+ return dataHash, nil
+}
+
// WriteStep works for a short period of time (determined by stepCtx) to get
// some more data (accounts/resources/kvpairs) by using readDatabaseStep, and
// write that data to the open tar file in cw.tar. The writing is done in
@@ -247,7 +294,7 @@ func (cw *catchpointWriter) asyncWriter(chunks chan catchpointFileChunkV6, respo
}
encodedChunk := protocol.Encode(&chk)
err := cw.tar.WriteHeader(&tar.Header{
- Name: fmt.Sprintf("balances.%d.msgpack", chunkNum),
+ Name: fmt.Sprintf(catchpointBalancesFileNameTemplate, chunkNum),
Mode: 0600,
Size: int64(len(encodedChunk)),
})
diff --git a/ledger/catchpointwriter_test.go b/ledger/catchpointwriter_test.go
index 5fd0518bb..db54b4527 100644
--- a/ledger/catchpointwriter_test.go
+++ b/ledger/catchpointwriter_test.go
@@ -49,6 +49,133 @@ import (
"github.com/algorand/msgp/msgp"
)
+type decodedCatchpointChunkData struct {
+ headerName string
+ data []byte
+}
+
+func readCatchpointContent(t *testing.T, tarReader *tar.Reader) []decodedCatchpointChunkData {
+ result := make([]decodedCatchpointChunkData, 0)
+ for {
+ header, err := tarReader.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ require.NoError(t, err)
+ break
+ }
+ data := make([]byte, header.Size)
+ readComplete := int64(0)
+
+ for readComplete < header.Size {
+ bytesRead, err := tarReader.Read(data[readComplete:])
+ readComplete += int64(bytesRead)
+ if err != nil {
+ if err == io.EOF {
+ if readComplete == header.Size {
+ break
+ }
+ require.NoError(t, err)
+ }
+ break
+ }
+ }
+
+ result = append(result, decodedCatchpointChunkData{headerName: header.Name, data: data})
+ }
+
+ return result
+}
+
+func readCatchpointDataFile(t *testing.T, catchpointDataPath string) []decodedCatchpointChunkData {
+ fileContent, err := os.ReadFile(catchpointDataPath)
+ require.NoError(t, err)
+
+ compressorReader, err := catchpointStage1Decoder(bytes.NewBuffer(fileContent))
+ require.NoError(t, err)
+
+ tarReader := tar.NewReader(compressorReader)
+ return readCatchpointContent(t, tarReader)
+}
+
+func readCatchpointFile(t *testing.T, catchpointPath string) []decodedCatchpointChunkData {
+ fileContent, err := os.ReadFile(catchpointPath)
+ require.NoError(t, err)
+
+ gzipReader, err := gzip.NewReader(bytes.NewBuffer(fileContent))
+ require.NoError(t, err)
+ defer gzipReader.Close()
+
+ tarReader := tar.NewReader(gzipReader)
+ return readCatchpointContent(t, tarReader)
+}
+
+func verifyStateProofVerificationContextWrite(t *testing.T, data []ledgercore.StateProofVerificationContext) {
+ // create new protocol version, which has lower lookback
+ testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestBasicCatchpointWriter")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.CatchpointLookback = 32
+ config.Consensus[testProtocolVersion] = protoParams
+ temporaryDirectory := t.TempDir()
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+ accts := ledgertesting.RandomAccounts(300, false)
+
+ ml := makeMockLedgerForTracker(t, true, 10, testProtocolVersion, []map[basics.Address]basics.AccountData{accts})
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ conf.CatchpointInterval = 1
+ conf.Archival = true
+ au, _ := newAcctUpdates(t, ml, conf)
+ err := au.loadFromDisk(ml, 0)
+ require.NoError(t, err)
+ au.close()
+ fileName := filepath.Join(temporaryDirectory, "15.data")
+
+ mockCommitData := make([]verificationCommitContext, 0)
+ for _, element := range data {
+ mockCommitData = append(mockCommitData, verificationCommitContext{verificationContext: element})
+ }
+
+ err = ml.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error {
+ return commitSPContexts(ctx, tx, mockCommitData)
+ })
+
+ require.NoError(t, err)
+
+ err = ml.trackerDB().Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
+ writer, err := makeCatchpointWriter(context.Background(), fileName, tx, ResourcesPerCatchpointFileChunk)
+ if err != nil {
+ return err
+ }
+ _, err = writer.WriteStateProofVerificationContext()
+ if err != nil {
+ return err
+ }
+ for {
+ more, err := writer.WriteStep(context.Background())
+ require.NoError(t, err)
+ if !more {
+ break
+ }
+ }
+ return
+ })
+
+ catchpointData := readCatchpointDataFile(t, fileName)
+ require.Equal(t, catchpointSPVerificationFileName, catchpointData[0].headerName)
+ var wrappedData catchpointStateProofVerificationContext
+ err = protocol.Decode(catchpointData[0].data, &wrappedData)
+ require.NoError(t, err)
+
+ for index, verificationContext := range wrappedData.Data {
+ require.Equal(t, data[index], verificationContext)
+ }
+}
+
func TestCatchpointFileBalancesChunkEncoding(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -133,6 +260,10 @@ func TestBasicCatchpointWriter(t *testing.T) {
if err != nil {
return err
}
+ _, err = writer.WriteStateProofVerificationContext()
+ if err != nil {
+ return err
+ }
for {
more, err := writer.WriteStep(context.Background())
require.NoError(t, err)
@@ -142,45 +273,15 @@ func TestBasicCatchpointWriter(t *testing.T) {
}
return
})
- require.NoError(t, err)
- // load the file from disk.
- fileContent, err := os.ReadFile(fileName)
- require.NoError(t, err)
- compressorReader, err := catchpointStage1Decoder(bytes.NewBuffer(fileContent))
- require.NoError(t, err)
- defer compressorReader.Close()
- tarReader := tar.NewReader(compressorReader)
-
- header, err := tarReader.Next()
- require.NoError(t, err)
-
- balancesBlockBytes := make([]byte, header.Size)
- readComplete := int64(0)
-
- for readComplete < header.Size {
- bytesRead, err := tarReader.Read(balancesBlockBytes[readComplete:])
- readComplete += int64(bytesRead)
- if err != nil {
- if err == io.EOF {
- if readComplete == header.Size {
- break
- }
- require.NoError(t, err)
- }
- break
- }
- }
-
- require.Equal(t, "balances.1.msgpack", header.Name)
+ catchpointContent := readCatchpointDataFile(t, fileName)
+ balanceFileName := fmt.Sprintf(catchpointBalancesFileNameTemplate, 1)
+ require.Equal(t, balanceFileName, catchpointContent[1].headerName)
var chunk catchpointFileChunkV6
- err = protocol.Decode(balancesBlockBytes, &chunk)
+ err = protocol.Decode(catchpointContent[1].data, &chunk)
require.NoError(t, err)
require.Equal(t, uint64(len(accts)), uint64(len(chunk.Balances)))
-
- _, err = tarReader.Next()
- require.Equal(t, io.EOF, err)
}
func testWriteCatchpoint(t *testing.T, rdb trackerdb.TrackerStore, datapath string, filepath string, maxResourcesPerChunk int) CatchpointFileHeader {
@@ -203,7 +304,10 @@ func testWriteCatchpoint(t *testing.T, rdb trackerdb.TrackerStore, datapath stri
if err != nil {
return err
}
-
+ _, err = writer.WriteStateProofVerificationContext()
+ if err != nil {
+ return err
+ }
for {
more, err := writer.WriteStep(context.Background())
require.NoError(t, err)
@@ -226,7 +330,7 @@ func testWriteCatchpoint(t *testing.T, rdb trackerdb.TrackerStore, datapath stri
blockHeaderDigest := crypto.Hash([]byte{1, 2, 3})
catchpointLabel := fmt.Sprintf("%d#%v", blocksRound, blockHeaderDigest) // this is not a correct way to create a label, but it's good enough for this unit test
catchpointFileHeader := CatchpointFileHeader{
- Version: CatchpointFileVersionV6,
+ Version: CatchpointFileVersionV7,
BalancesRound: accountsRnd,
BlocksRound: blocksRound,
Totals: totals,
@@ -246,6 +350,26 @@ func testWriteCatchpoint(t *testing.T, rdb trackerdb.TrackerStore, datapath stri
return catchpointFileHeader
}
+func TestStateProofVerificationContextWrite(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ //t.Parallel() verifyStateProofVerificationContextWrite changes consensus
+
+ verificationContext := ledgercore.StateProofVerificationContext{
+ LastAttestedRound: 120,
+ VotersCommitment: nil,
+ OnlineTotalWeight: basics.MicroAlgos{Raw: 100},
+ }
+
+ verifyStateProofVerificationContextWrite(t, []ledgercore.StateProofVerificationContext{verificationContext})
+}
+
+func TestEmptyStateProofVerificationContextWrite(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ //t.Parallel() verifyStateProofVerificationContextWrite changes consensus
+
+ verifyStateProofVerificationContextWrite(t, []ledgercore.StateProofVerificationContext{})
+}
+
func TestCatchpointReadDatabaseOverflowSingleAccount(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -552,40 +676,10 @@ func testNewLedgerFromCatchpoint(t *testing.T, catchpointWriterReadAccess tracke
err = accessor.ResetStagingBalances(context.Background(), true)
require.NoError(t, err)
- // load the file from disk.
- fileContent, err := os.ReadFile(filepath)
- require.NoError(t, err)
- gzipReader, err := gzip.NewReader(bytes.NewBuffer(fileContent))
- require.NoError(t, err)
- tarReader := tar.NewReader(gzipReader)
var catchupProgress CatchpointCatchupAccessorProgress
- defer gzipReader.Close()
- for {
- header, err := tarReader.Next()
- if err != nil {
- if err == io.EOF {
- break
- }
- require.NoError(t, err)
- break
- }
- balancesBlockBytes := make([]byte, header.Size)
- readComplete := int64(0)
-
- for readComplete < header.Size {
- bytesRead, err := tarReader.Read(balancesBlockBytes[readComplete:])
- readComplete += int64(bytesRead)
- if err != nil {
- if err == io.EOF {
- if readComplete == header.Size {
- break
- }
- require.NoError(t, err)
- }
- break
- }
- }
- err = accessor.ProcessStagingBalances(context.Background(), header.Name, balancesBlockBytes, &catchupProgress)
+ catchpointContent := readCatchpointFile(t, filepath)
+ for _, catchpointData := range catchpointContent {
+ err = accessor.ProcessStagingBalances(context.Background(), catchpointData.headerName, catchpointData.data, &catchupProgress)
require.NoError(t, err)
}
diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go
index 0a6dd4b5c..ec33f2ac5 100644
--- a/ledger/catchupaccessor.go
+++ b/ledger/catchupaccessor.go
@@ -128,6 +128,31 @@ func (w *stagingWriterImpl) writeKVs(ctx context.Context, kvrs []encoded.KVRecor
hashes := make([][]byte, len(kvrs))
for i := 0; i < len(kvrs); i++ {
keys[i] = kvrs[i].Key
+
+ // Since `encoded.KVRecordV6` is `omitempty` and `omitemptyarray`,
+ // when we have an instance of `encoded.KVRecordV6` with nil value,
+ // an empty box is unmarshalled to have `nil` value,
+ // while this might be mistaken to be a box deletion.
+ //
+ // We don't want to mistake this to be a deleted box:
+ // We are (and should be) during Fast Catchup (FC)
+ // writing to DB with empty byte string, rather than writing nil.
+ //
+ // This matters in sqlite3,
+ // for sqlite3 differs on writing nil byte slice to table from writing []byte{}:
+ // - writing nil byte slice is true that `value is NULL`
+ // - writing []byte{} is false on `value is NULL`.
+ //
+ // For the sake of consistency, we convert nil to []byte{}.
+ //
+ // Also, from a round by round catchup perspective,
+ // when we delete a box, in accountsNewRoundImpl method,
+ // the kv pair with value = nil will be deleted from kvstore table.
+ // Thus, it seems more consistent and appropriate to write as []byte{}.
+
+ if kvrs[i].Value == nil {
+ kvrs[i].Value = []byte{}
+ }
values[i] = kvrs[i].Value
hashes[i] = trackerdb.KvHashBuilderV6(string(keys[i]), values[i])
}
@@ -339,10 +364,14 @@ type CatchpointCatchupAccessorProgress struct {
// ProcessStagingBalances deserialize the given bytes as a temporary staging balances
func (c *catchpointCatchupAccessorImpl) ProcessStagingBalances(ctx context.Context, sectionName string, bytes []byte, progress *CatchpointCatchupAccessorProgress) (err error) {
- if sectionName == "content.msgpack" {
+ // content.msgpack comes first, followed by stateProofVerificationContext.msgpack and then by balances.x.msgpack.
+ if sectionName == CatchpointContentFileName {
return c.processStagingContent(ctx, bytes, progress)
}
- if strings.HasPrefix(sectionName, "balances.") && strings.HasSuffix(sectionName, ".msgpack") {
+ if sectionName == catchpointSPVerificationFileName {
+ return c.processStagingStateProofVerificationContext(bytes)
+ }
+ if strings.HasPrefix(sectionName, catchpointBalancesFileNamePrefix) && strings.HasSuffix(sectionName, catchpointBalancesFileNameSuffix) {
return c.processStagingBalances(ctx, bytes, progress)
}
// we want to allow undefined sections to support backward compatibility.
@@ -350,6 +379,27 @@ func (c *catchpointCatchupAccessorImpl) ProcessStagingBalances(ctx context.Conte
return nil
}
+// processStagingStateProofVerificationContext deserialize the given bytes as a temporary staging state proof verification data
+func (c *catchpointCatchupAccessorImpl) processStagingStateProofVerificationContext(bytes []byte) (err error) {
+ var decodedData catchpointStateProofVerificationContext
+ err = protocol.Decode(bytes, &decodedData)
+ if err != nil {
+ return err
+ }
+
+ if len(decodedData.Data) == 0 {
+ return
+ }
+
+ // 6 months of stuck state proofs should lead to about 1.5 MB of data, so we avoid redundant timers
+ // and progress reports.
+ err = c.ledger.trackerDB().Batch(func(ctx context.Context, tx trackerdb.BatchScope) (err error) {
+ return tx.MakeSpVerificationCtxWriter().StoreSPContextsToCatchpointTbl(ctx, decodedData.Data)
+ })
+
+ return err
+}
+
// processStagingContent deserialize the given bytes as a temporary staging balances content
func (c *catchpointCatchupAccessorImpl) processStagingContent(ctx context.Context, bytes []byte, progress *CatchpointCatchupAccessorProgress) (err error) {
if progress.SeenHeader {
@@ -363,6 +413,7 @@ func (c *catchpointCatchupAccessorImpl) processStagingContent(ctx context.Contex
switch fileHeader.Version {
case CatchpointFileVersionV5:
case CatchpointFileVersionV6:
+ case CatchpointFileVersionV7:
default:
return fmt.Errorf("CatchpointCatchupAccessorImpl::processStagingContent: unable to process catchpoint - version %d is not supported", fileHeader.Version)
}
@@ -377,7 +428,10 @@ func (c *catchpointCatchupAccessorImpl) processStagingContent(ctx context.Contex
if err != nil {
return err
}
-
+ err = cw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupVersion, fileHeader.Version)
+ if err != nil {
+ return fmt.Errorf("CatchpointCatchupAccessorImpl::processStagingContent: unable to write catchpoint catchup version '%s': %v", trackerdb.CatchpointStateCatchupVersion, err)
+ }
aw, err := tx.MakeAccountsWriter()
if err != nil {
return err
@@ -387,7 +441,7 @@ func (c *catchpointCatchupAccessorImpl) processStagingContent(ctx context.Contex
if err != nil {
return fmt.Errorf("CatchpointCatchupAccessorImpl::processStagingContent: unable to write catchpoint catchup state '%s': %v", trackerdb.CatchpointStateCatchupBlockRound, err)
}
- if fileHeader.Version == CatchpointFileVersionV6 {
+ if fileHeader.Version >= CatchpointFileVersionV6 {
err = cw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupHashRound, uint64(fileHeader.BlocksRound))
if err != nil {
return fmt.Errorf("CatchpointCatchupAccessorImpl::processStagingContent: unable to write catchpoint catchup state '%s': %v", trackerdb.CatchpointStateCatchupHashRound, err)
@@ -406,6 +460,7 @@ func (c *catchpointCatchupAccessorImpl) processStagingContent(ctx context.Contex
progress.Version = fileHeader.Version
c.ledger.setSynchronousMode(ctx, c.ledger.accountsRebuildSynchronousMode)
}
+
return err
}
@@ -442,6 +497,8 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
expectingMoreEntries = make([]bool, len(balances.Balances))
case CatchpointFileVersionV6:
+ fallthrough
+ case CatchpointFileVersionV7:
var chunk catchpointFileChunkV6
err = protocol.Decode(bytes, &chunk)
if err != nil {
@@ -872,15 +929,22 @@ func (c *catchpointCatchupAccessorImpl) GetCatchupBlockRound(ctx context.Context
// VerifyCatchpoint verifies that the catchpoint is valid by reconstructing the label.
func (c *catchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, blk *bookkeeping.Block) (err error) {
var balancesHash crypto.Digest
+ var rawStateProofVerificationContext []ledgercore.StateProofVerificationContext
var blockRound basics.Round
var totals ledgercore.AccountTotals
var catchpointLabel string
+ var version uint64
catchpointLabel, err = c.catchpointStore.ReadCatchpointStateString(ctx, trackerdb.CatchpointStateCatchupLabel)
if err != nil {
return fmt.Errorf("unable to read catchpoint catchup state '%s': %v", trackerdb.CatchpointStateCatchupLabel, err)
}
+ version, err = c.catchpointStore.ReadCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupVersion)
+ if err != nil {
+ return fmt.Errorf("unable to retrieve catchpoint version: %v", err)
+ }
+
var iRound uint64
iRound, err = c.catchpointStore.ReadCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupBlockRound)
if err != nil {
@@ -916,6 +980,12 @@ func (c *catchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, bl
if err != nil {
return fmt.Errorf("unable to get accounts totals: %v", err)
}
+
+ rawStateProofVerificationContext, err = tx.MakeSpVerificationCtxReaderWriter().GetAllSPContextsFromCatchpointTbl(ctx)
+ if err != nil {
+ return fmt.Errorf("unable to get state proof verification data: %v", err)
+ }
+
return
})
ledgerVerifycatchpointMicros.AddMicrosecondsSince(start, nil)
@@ -926,10 +996,20 @@ func (c *catchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, bl
return fmt.Errorf("block round in block header doesn't match block round in catchpoint: %d != %d", blockRound, blk.Round())
}
- catchpointLabelMaker := ledgercore.MakeCatchpointLabel(blockRound, blk.Digest(), balancesHash, totals)
+ wrappedContext := catchpointStateProofVerificationContext{Data: rawStateProofVerificationContext}
+ spVerificationHash := crypto.HashObj(wrappedContext)
+
+ var catchpointLabelMaker ledgercore.CatchpointLabelMaker
+ blockDigest := blk.Digest()
+ if version <= CatchpointFileVersionV6 {
+ catchpointLabelMaker = ledgercore.MakeCatchpointLabelMakerV6(blockRound, &blockDigest, &balancesHash, totals)
+ } else {
+ catchpointLabelMaker = ledgercore.MakeCatchpointLabelMakerCurrent(blockRound, &blockDigest, &balancesHash, totals, &spVerificationHash)
+ }
+ generatedLabel := ledgercore.MakeLabel(catchpointLabelMaker)
- if catchpointLabel != catchpointLabelMaker.String() {
- return fmt.Errorf("catchpoint hash mismatch; expected %s, calculated %s", catchpointLabel, catchpointLabelMaker.String())
+ if catchpointLabel != generatedLabel {
+ return fmt.Errorf("catchpoint hash mismatch; expected %s, calculated %s", catchpointLabel, generatedLabel)
}
return nil
}
diff --git a/ledger/catchupaccessor_test.go b/ledger/catchupaccessor_test.go
index 5cbf16987..a97377bd3 100644
--- a/ledger/catchupaccessor_test.go
+++ b/ledger/catchupaccessor_test.go
@@ -101,7 +101,7 @@ func benchmarkRestoringFromCatchpointFileHelper(b *testing.B) {
accountsCount := uint64(b.N)
fileHeader := CatchpointFileHeader{
- Version: CatchpointFileVersionV6,
+ Version: CatchpointFileVersionV7,
BalancesRound: basics.Round(0),
BlocksRound: basics.Round(0),
Totals: ledgercore.AccountTotals{},
@@ -112,7 +112,7 @@ func benchmarkRestoringFromCatchpointFileHelper(b *testing.B) {
}
encodedFileHeader := protocol.Encode(&fileHeader)
var progress CatchpointCatchupAccessorProgress
- err = catchpointAccessor.ProcessStagingBalances(context.Background(), "content.msgpack", encodedFileHeader, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(context.Background(), CatchpointContentFileName, encodedFileHeader, &progress)
require.NoError(b, err)
// pre-create all encoded chunks.
@@ -128,7 +128,8 @@ func benchmarkRestoringFromCatchpointFileHelper(b *testing.B) {
last64KStart = time.Now()
}
- err = catchpointAccessor.ProcessStagingBalances(context.Background(), "balances.XX.msgpack", encodedAccounts, &progress)
+ balancesFileName := fmt.Sprintf("%s%s%s", catchpointBalancesFileNamePrefix, "XX", catchpointBalancesFileNameSuffix)
+ err = catchpointAccessor.ProcessStagingBalances(context.Background(), balancesFileName, encodedAccounts, &progress)
require.NoError(b, err)
last64KIndex--
}
@@ -148,6 +149,83 @@ func BenchmarkRestoringFromCatchpointFile(b *testing.B) {
}
}
+func initializeTestCatchupAccessor(t *testing.T, l *Ledger, accountsCount uint64) (CatchpointCatchupAccessor, CatchpointCatchupAccessorProgress) {
+ log := logging.TestingLog(t)
+ catchpointAccessor := MakeCatchpointCatchupAccessor(l, log)
+
+ var progress CatchpointCatchupAccessorProgress
+
+ ctx := context.Background()
+
+ // We do this to create catchpoint staging tables.
+ err := catchpointAccessor.ResetStagingBalances(ctx, true)
+ require.NoError(t, err)
+
+ // We do this to initialize the catchpointblocks table. Needed to be able to use CompleteCatchup.
+ err = catchpointAccessor.StoreFirstBlock(ctx, &bookkeeping.Block{})
+ require.NoError(t, err)
+
+ // We do this to initialize the accounttotals table. Needed to be able to use CompleteCatchup.
+ fileHeader := CatchpointFileHeader{
+ Version: CatchpointFileVersionV7,
+ BalancesRound: basics.Round(0),
+ BlocksRound: basics.Round(0),
+ Totals: ledgercore.AccountTotals{},
+ TotalAccounts: accountsCount,
+ TotalChunks: (accountsCount + BalancesPerCatchpointFileChunk - 1) / BalancesPerCatchpointFileChunk,
+ Catchpoint: "",
+ BlockHeaderDigest: crypto.Digest{},
+ }
+ encodedFileHeader := protocol.Encode(&fileHeader)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, CatchpointContentFileName, encodedFileHeader, &progress)
+
+ return catchpointAccessor, progress
+}
+
+func verifyStateProofVerificationCatchupAccessor(t *testing.T, targetData []ledgercore.StateProofVerificationContext) {
+ // setup boilerplate
+ log := logging.TestingLog(t)
+ dbBaseFileName := t.Name()
+ const inMem = true
+ genesisInitState, initkeys := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ cfg := config.GetDefaultLocal()
+ l, err := OpenLedger(log, dbBaseFileName, inMem, genesisInitState, cfg)
+ require.NoError(t, err, "could not open ledger")
+ defer func() {
+ l.Close()
+ }()
+
+ catchpointAccessor, progress := initializeTestCatchupAccessor(t, l, uint64(len(initkeys)))
+
+ require.NoError(t, err)
+
+ wrappedData := catchpointStateProofVerificationContext{
+ Data: targetData,
+ }
+ blob := protocol.Encode(&wrappedData)
+
+ ctx := context.Background()
+ err = catchpointAccessor.ProcessStagingBalances(ctx, catchpointSPVerificationFileName, blob, &progress)
+ require.NoError(t, err)
+
+ err = catchpointAccessor.CompleteCatchup(ctx)
+ require.NoError(t, err)
+
+ var trackedStateProofVerificationContext []ledgercore.StateProofVerificationContext
+ err = l.trackerDBs.Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) error {
+ dbData, err := tx.MakeSpVerificationCtxReader().GetAllSPContexts(ctx)
+ trackedStateProofVerificationContext = dbData
+ return err
+ })
+
+ require.NoError(t, err)
+ require.Equal(t, len(targetData), len(trackedStateProofVerificationContext))
+ for index, data := range targetData {
+ require.Equal(t, data, trackedStateProofVerificationContext[index])
+ }
+ require.NoError(t, err)
+}
+
func TestCatchupAccessorFoo(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -247,16 +325,17 @@ func TestBuildMerkleTrie(t *testing.T) {
err = catchpointAccessor.ProcessStagingBalances(ctx, "ignoredContent", blob, &progress)
require.NoError(t, err)
// this shouldn't work yet
- err = catchpointAccessor.ProcessStagingBalances(ctx, "balances.FAKE.msgpack", blob, &progress)
+ balancesFileName := fmt.Sprintf("%s%s%s", catchpointBalancesFileNamePrefix, "FAKE", catchpointBalancesFileNameSuffix)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, balancesFileName, blob, &progress)
require.Error(t, err)
// this needs content
- err = catchpointAccessor.ProcessStagingBalances(ctx, "content.msgpack", blob, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, CatchpointContentFileName, blob, &progress)
require.Error(t, err)
// content.msgpack from this:
accountsCount := uint64(len(initKeys))
fileHeader := CatchpointFileHeader{
- Version: CatchpointFileVersionV6,
+ Version: CatchpointFileVersionV7,
BalancesRound: basics.Round(0),
BlocksRound: basics.Round(0),
Totals: ledgercore.AccountTotals{},
@@ -266,14 +345,15 @@ func TestBuildMerkleTrie(t *testing.T) {
BlockHeaderDigest: crypto.Digest{},
}
encodedFileHeader := protocol.Encode(&fileHeader)
- err = catchpointAccessor.ProcessStagingBalances(ctx, "content.msgpack", encodedFileHeader, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, CatchpointContentFileName, encodedFileHeader, &progress)
require.NoError(t, err)
// shouldn't work a second time
- err = catchpointAccessor.ProcessStagingBalances(ctx, "content.msgpack", encodedFileHeader, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, CatchpointContentFileName, encodedFileHeader, &progress)
require.Error(t, err)
// This should still fail, but slightly different coverage path
- err = catchpointAccessor.ProcessStagingBalances(ctx, "balances.FAKE.msgpack", blob, &progress)
+ balancesFileName = fmt.Sprintf("%s%s%s", catchpointBalancesFileNamePrefix, "FAKE", catchpointBalancesFileNameSuffix)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, balancesFileName, blob, &progress)
require.Error(t, err)
// create some catchpoint data
@@ -294,6 +374,24 @@ func TestBuildMerkleTrie(t *testing.T) {
require.Equal(t, basics.Round(0), blockRound)
}
+func TestCatchupAccessorStateProofVerificationContext(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ verificationContext := ledgercore.StateProofVerificationContext{
+ LastAttestedRound: 120,
+ VotersCommitment: nil,
+ OnlineTotalWeight: basics.MicroAlgos{Raw: 100},
+ }
+
+ verifyStateProofVerificationCatchupAccessor(t, []ledgercore.StateProofVerificationContext{verificationContext})
+}
+
+func TestCatchupAccessorEmptyStateProofVerificationContext(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ verifyStateProofVerificationCatchupAccessor(t, []ledgercore.StateProofVerificationContext{})
+}
+
// blockdb.go code
// TODO: blockStartCatchupStaging called from StoreFirstBlock()
// TODO: blockCompleteCatchup called from FinishBlocks()
@@ -393,7 +491,7 @@ func TestCatchupAccessorResourceCountMismatch(t *testing.T) {
// content.msgpack from this:
fileHeader := CatchpointFileHeader{
- Version: CatchpointFileVersionV6,
+ Version: CatchpointFileVersionV7,
BalancesRound: basics.Round(0),
BlocksRound: basics.Round(0),
Totals: ledgercore.AccountTotals{},
@@ -403,7 +501,7 @@ func TestCatchupAccessorResourceCountMismatch(t *testing.T) {
BlockHeaderDigest: crypto.Digest{},
}
encodedFileHeader := protocol.Encode(&fileHeader)
- err = catchpointAccessor.ProcessStagingBalances(ctx, "content.msgpack", encodedFileHeader, &progress)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, CatchpointContentFileName, encodedFileHeader, &progress)
require.NoError(t, err)
var balances catchpointFileChunkV6
@@ -419,7 +517,8 @@ func TestCatchupAccessorResourceCountMismatch(t *testing.T) {
encodedAccounts := protocol.Encode(&balances)
// expect error since there is a resource count mismatch
- err = catchpointAccessor.ProcessStagingBalances(ctx, "balances.XX.msgpack", encodedAccounts, &progress)
+ balancesFileName := fmt.Sprintf("%s%s%s", catchpointBalancesFileNamePrefix, "XX", catchpointBalancesFileNameSuffix)
+ err = catchpointAccessor.ProcessStagingBalances(ctx, balancesFileName, encodedAccounts, &progress)
require.Error(t, err)
}
@@ -503,7 +602,7 @@ func TestCatchupAccessorProcessStagingBalances(t *testing.T) {
TotalAccounts: numAccounts,
TotalChunks: 2,
SeenHeader: true,
- Version: CatchpointFileVersionV6,
+ Version: CatchpointFileVersionV7,
}
// create some walking gentlemen
diff --git a/ledger/double_test.go b/ledger/double_test.go
index bbc5e9520..16d38e768 100644
--- a/ledger/double_test.go
+++ b/ledger/double_test.go
@@ -24,7 +24,7 @@ import (
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/txntest"
- "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/eval"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/protocol"
"github.com/stretchr/testify/require"
@@ -42,12 +42,12 @@ import (
// then temporarily placed in `generate` mode so that the entire block can be
// generated in the copy second ledger, and compared.
type DoubleLedger struct {
- t *testing.T
+ t testing.TB
generator *Ledger
validator *Ledger
- eval *internal.BlockEvaluator
+ eval *eval.BlockEvaluator
}
func (dl DoubleLedger) Close() {
@@ -56,18 +56,18 @@ func (dl DoubleLedger) Close() {
}
// NewDoubleLedger creates a new DoubleLedger with the supplied balances and consensus version.
-func NewDoubleLedger(t *testing.T, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion, cfg config.Local) DoubleLedger {
- g := newSimpleLedgerWithConsensusVersion(t, balances, cv, cfg)
- v := newSimpleLedgerFull(t, balances, cv, g.GenesisHash(), cfg)
+func NewDoubleLedger(t testing.TB, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion, cfg config.Local, opts ...simpleLedgerOption) DoubleLedger {
+ g := newSimpleLedgerWithConsensusVersion(t, balances, cv, cfg, opts...)
+ v := newSimpleLedgerFull(t, balances, cv, g.GenesisHash(), cfg, opts...)
return DoubleLedger{t, g, v, nil}
}
-func (dl *DoubleLedger) beginBlock() *internal.BlockEvaluator {
+func (dl *DoubleLedger) beginBlock() *eval.BlockEvaluator {
dl.eval = nextBlock(dl.t, dl.generator)
return dl.eval
}
-func (dl *DoubleLedger) txn(tx *txntest.Txn, problem ...string) {
+func (dl *DoubleLedger) txn(tx *txntest.Txn, problem ...string) (stib *transactions.SignedTxnInBlock) {
dl.t.Helper()
if dl.eval == nil {
dl.beginBlock()
@@ -76,25 +76,34 @@ func (dl *DoubleLedger) txn(tx *txntest.Txn, problem ...string) {
if len(problem) > 0 {
dl.eval = nil
} else {
- dl.endBlock()
+ vb := dl.endBlock()
+ // It should have a stib, but don't panic here because of an earlier problem.
+ if len(vb.Block().Payset) > 0 {
+ stib = &vb.Block().Payset[0]
+ }
}
}()
}
txn(dl.t, dl.generator, dl.eval, tx, problem...)
+ return nil
}
-func (dl *DoubleLedger) txns(txns ...*txntest.Txn) {
+func (dl *DoubleLedger) txns(txns ...*txntest.Txn) (payset []transactions.SignedTxnInBlock) {
dl.t.Helper()
if dl.eval == nil {
dl.beginBlock()
- defer dl.endBlock()
+ defer func() {
+ vb := dl.endBlock()
+ payset = vb.Block().Payset
+ }()
}
for _, tx := range txns {
dl.txn(tx)
}
+ return nil
}
-func (dl *DoubleLedger) txgroup(problem string, txns ...*txntest.Txn) {
+func (dl *DoubleLedger) txgroup(problem string, txns ...*txntest.Txn) (payset []transactions.SignedTxnInBlock) {
dl.t.Helper()
if dl.eval == nil {
dl.beginBlock()
@@ -103,7 +112,8 @@ func (dl *DoubleLedger) txgroup(problem string, txns ...*txntest.Txn) {
if problem != "" {
dl.eval = nil
} else {
- dl.endBlock()
+ vb := dl.endBlock()
+ payset = vb.Block().Payset
}
}()
}
@@ -114,6 +124,7 @@ func (dl *DoubleLedger) txgroup(problem string, txns ...*txntest.Txn) {
require.Error(dl.t, err)
require.Contains(dl.t, err.Error(), problem)
}
+ return nil
}
func (dl *DoubleLedger) fullBlock(txs ...*txntest.Txn) *ledgercore.ValidatedBlock {
@@ -157,7 +168,7 @@ func (dl *DoubleLedger) reloadLedgers() {
require.NoError(dl.t, dl.validator.reloadLedger())
}
-func checkBlock(t *testing.T, checkLedger *Ledger, vb *ledgercore.ValidatedBlock) {
+func checkBlock(t testing.TB, checkLedger *Ledger, vb *ledgercore.ValidatedBlock) {
bl := vb.Block()
msg := bl.MarshalMsg(nil)
var reconstituted bookkeeping.Block
@@ -202,7 +213,7 @@ func checkBlock(t *testing.T, checkLedger *Ledger, vb *ledgercore.ValidatedBlock
// require.Equal(t, vb.Delta().Accts, cb.Delta().Accts)
}
-func nextCheckBlock(t testing.TB, ledger *Ledger, rs bookkeeping.RewardsState) *internal.BlockEvaluator {
+func nextCheckBlock(t testing.TB, ledger *Ledger, rs bookkeeping.RewardsState) *eval.BlockEvaluator {
rnd := ledger.Latest()
hdr, err := ledger.BlockHdr(rnd)
require.NoError(t, err)
@@ -211,7 +222,7 @@ func nextCheckBlock(t testing.TB, ledger *Ledger, rs bookkeeping.RewardsState) *
nextHdr.RewardsState = rs
// follow nextBlock, which does this for determinism
nextHdr.TimeStamp = hdr.TimeStamp + 1
- eval, err := internal.StartEvaluator(ledger, nextHdr, internal.EvaluatorOptions{
+ eval, err := eval.StartEvaluator(ledger, nextHdr, eval.EvaluatorOptions{
Generate: false,
Validate: true, // Do the complete checks that a new txn would be subject to
})
diff --git a/ledger/internal/appcow.go b/ledger/eval/appcow.go
index f843e1706..14815e8e2 100644
--- a/ledger/internal/appcow.go
+++ b/ledger/eval/appcow.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package internal
+package eval
import (
"fmt"
@@ -29,7 +29,7 @@ import (
"github.com/algorand/go-algorand/protocol"
)
-//msgp: ignore storageAction
+//msgp:ignore storageAction
type storageAction uint64
const (
@@ -74,6 +74,7 @@ func (vd valueDelta) serialize() (vdelta basics.ValueDelta, ok bool) {
}
// stateDelta is similar to basics.StateDelta but stores both values before and after change
+//
//msgp:ignore stateDelta
type stateDelta map[string]valueDelta
@@ -458,7 +459,7 @@ func (cb *roundCowState) StatefulEval(gi int, params *logic.EvalParams, aidx bas
calf := cb.child(1)
defer func() {
// get rid of references to the object that is about to be recycled
- params.Ledger = nil
+ params.Ledger = nil
params.SigLedger = nil
calf.recycle()
}()
@@ -466,19 +467,9 @@ func (cb *roundCowState) StatefulEval(gi int, params *logic.EvalParams, aidx bas
params.Ledger = calf
params.SigLedger = calf
- // Eval the program
- pass, cx, err := logic.EvalContract(program, gi, aidx, params)
+ pass, err = logic.EvalApp(program, gi, aidx, params)
if err != nil {
- var details string
- if cx != nil {
- pc, det := cx.PcDetails()
- details = fmt.Sprintf("pc=%d, opcodes=%s", pc, det)
- }
- // Don't wrap ClearStateBudgetError, so it will be taken seriously
- if _, ok := err.(logic.ClearStateBudgetError); ok {
- return false, transactions.EvalDelta{}, err
- }
- return false, transactions.EvalDelta{}, ledgercore.LogicEvalError{Err: err, Details: details}
+ return false, transactions.EvalDelta{}, err
}
// If program passed, build our eval delta, and commit to state changes
diff --git a/ledger/internal/appcow_test.go b/ledger/eval/appcow_test.go
index d4025524c..2c46d02c0 100644
--- a/ledger/internal/appcow_test.go
+++ b/ledger/eval/appcow_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package internal
+package eval
import (
"fmt"
@@ -107,6 +107,10 @@ func (ml *emptyLedger) GetStateProofNextRound() basics.Round {
return basics.Round(0)
}
+func (ml *emptyLedger) GetStateProofVerificationContext(_ basics.Round) (*ledgercore.StateProofVerificationContext, error) {
+ return nil, fmt.Errorf("emptyLedger does not implement GetStateProofVerificationContext")
+}
+
type modsData struct {
addr basics.Address
cidx basics.CreatableIndex
diff --git a/ledger/internal/applications.go b/ledger/eval/applications.go
index f0466b7d4..e126cfd30 100644
--- a/ledger/internal/applications.go
+++ b/ledger/eval/applications.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package internal
+package eval
import (
"fmt"
@@ -229,7 +229,7 @@ func (cs *roundCowState) NewBox(appIdx basics.AppIndex, key string, value []byte
return err
}
if exists {
- return fmt.Errorf("attempt to recreate %s", key)
+ return fmt.Errorf("attempt to recreate box %#x", key)
}
record, err := cs.Get(appAddr, false)
@@ -258,10 +258,10 @@ func (cs *roundCowState) SetBox(appIdx basics.AppIndex, key string, value []byte
return err
}
if !ok {
- return fmt.Errorf("box %s does not exist for %d", key, appIdx)
+ return fmt.Errorf("box %#x does not exist for %d", key, appIdx)
}
if len(old) != len(value) {
- return fmt.Errorf("box %s is wrong size old:%d != new:%d",
+ return fmt.Errorf("box %#x is wrong size old:%d != new:%d",
key, len(old), len(value))
}
return cs.kvPut(fullKey, value)
diff --git a/ledger/internal/assetcow.go b/ledger/eval/assetcow.go
index 3813dad7c..50b710675 100644
--- a/ledger/internal/assetcow.go
+++ b/ledger/eval/assetcow.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package internal
+package eval
import (
"github.com/algorand/go-algorand/data/basics"
diff --git a/ledger/internal/cow.go b/ledger/eval/cow.go
index ade61f820..c58f65fc5 100644
--- a/ledger/internal/cow.go
+++ b/ledger/eval/cow.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package internal
+package eval
import (
"errors"
@@ -63,8 +63,8 @@ type roundCowParent interface {
getStorageLimits(addr basics.Address, aidx basics.AppIndex, global bool) (basics.StateSchema, error)
allocated(addr basics.Address, aidx basics.AppIndex, global bool) (bool, error)
getKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error)
-
kvGet(key string) ([]byte, bool, error)
+ GetStateProofVerificationContext(stateProofLastAttestedRound basics.Round) (*ledgercore.StateProofVerificationContext, error)
}
// When adding new fields make sure to clear them in the roundCowState.recycle() as well to avoid dirty state
@@ -80,14 +80,14 @@ type roundCowState struct {
// storage deltas populated as side effects of AppCall transaction
// 1. Opt-in/Close actions (see Allocate/Deallocate)
- // 2. Stateful TEAL evaluation (see setKey/delKey)
+ // 2. Application evaluation (see setKey/delKey)
// must be incorporated into mods.accts before passing deltas forward
sdeltas map[basics.Address]map[storagePtr]*storageDelta
- // either or not maintain compatibility with original app refactoring behavior
+ // whether or not to maintain compatibility with original app refactoring behavior
// this is needed for generating old eval delta in new code
compatibilityMode bool
- // cache mainaining accountIdx used in getKey for local keys access
+ // cache maintaining accountIdx used in getKey for local keys access
compatibilityGetKeyCache map[basics.Address]map[storagePtr]uint64
// prevTotals contains the accounts totals for the previous round. It's being used to calculate the totals for the new round
@@ -245,6 +245,10 @@ func (cb *roundCowState) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, erro
return cb.lookupParent.BlockHdr(r)
}
+func (cb *roundCowState) GetStateProofVerificationContext(stateProofLastAttestedRound basics.Round) (*ledgercore.StateProofVerificationContext, error) {
+ return cb.lookupParent.GetStateProofVerificationContext(stateProofLastAttestedRound)
+}
+
func (cb *roundCowState) blockHdrCached(r basics.Round) (bookkeeping.BlockHeader, error) {
return cb.lookupParent.blockHdrCached(r)
}
diff --git a/ledger/internal/cow_creatables.go b/ledger/eval/cow_creatables.go
index d43135cf8..3159d230b 100644
--- a/ledger/internal/cow_creatables.go
+++ b/ledger/eval/cow_creatables.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package internal
+package eval
import (
"fmt"
@@ -73,8 +73,8 @@ func (cs *roundCowState) GetAssetHolding(addr basics.Address, aidx basics.AssetI
return
}
if d.Holding == nil {
- // found and not deleled => must exist. Err if not
- err = fmt.Errorf("GetAppLocalState got a nil entry for (%s, %d): %p, %v", addr.String(), aidx, d.Holding, d.Deleted)
+ // found and not deleted => must exist. Err if not
+ err = fmt.Errorf("GetAssetHolding got a nil entry for (%s, %d): %p, %v", addr, aidx, d.Holding, d.Deleted)
}
ret = *d.Holding
return
diff --git a/ledger/internal/cow_test.go b/ledger/eval/cow_test.go
index 562c60f92..df33df168 100644
--- a/ledger/internal/cow_test.go
+++ b/ledger/eval/cow_test.go
@@ -14,9 +14,10 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package internal
+package eval
import (
+ "errors"
"reflect"
"testing"
@@ -25,7 +26,9 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/stateproofmsg"
"github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/ledger/apply"
"github.com/algorand/go-algorand/ledger/ledgercore"
ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/protocol"
@@ -34,8 +37,6 @@ import (
type mockLedger struct {
balanceMap map[basics.Address]basics.AccountData
- blocks map[basics.Round]bookkeeping.BlockHeader
- blockErr map[basics.Round]error
}
func (ml *mockLedger) lookup(addr basics.Address) (ledgercore.AccountData, error) {
@@ -99,18 +100,17 @@ func (ml *mockLedger) GetStateProofNextRound() basics.Round {
}
func (ml *mockLedger) BlockHdr(rnd basics.Round) (bookkeeping.BlockHeader, error) {
- err, hit := ml.blockErr[rnd]
- if hit {
- return bookkeeping.BlockHeader{}, err
- }
- hdr := ml.blocks[rnd] // default struct is fine if nothing found
- return hdr, nil
+ return bookkeeping.BlockHeader{}, errors.New("requested blockheader not found")
}
func (ml *mockLedger) blockHdrCached(rnd basics.Round) (bookkeeping.BlockHeader, error) {
return ml.blockHdrCached(rnd)
}
+func (ml *mockLedger) GetStateProofVerificationContext(rnd basics.Round) (*ledgercore.StateProofVerificationContext, error) {
+ return nil, errors.New("requested state proof verification data not found")
+}
+
func checkCowByUpdate(t *testing.T, cow *roundCowState, delta ledgercore.AccountDeltas) {
for i := 0; i < delta.Len(); i++ {
addr, data := delta.GetByIdx(i)
@@ -192,6 +192,51 @@ func TestCowBalance(t *testing.T) {
checkCowByUpdate(t, c0, updates2)
}
+// TestCowDeltasAfterCommit tests that deltas are still valid after committing to parent.
+func TestCowDeltasAfterCommit(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ accts0 := ledgertesting.RandomAccounts(20, true)
+ ml := mockLedger{balanceMap: accts0}
+
+ c0 := makeRoundCowState(
+ &ml, bookkeeping.BlockHeader{}, config.Consensus[protocol.ConsensusCurrentVersion],
+ 0, ledgercore.AccountTotals{}, 0)
+ checkCow(t, c0, accts0)
+
+ c1 := c0.child(0)
+
+ acctUpdates, _, _ := ledgertesting.RandomDeltas(10, accts0, 0)
+ applyUpdates(c1, acctUpdates)
+ acctUpdates.Dehydrate() // Prep for comparison
+
+ c1.kvPut("key", []byte("value"))
+ expectedKvMods := map[string]ledgercore.KvValueDelta{
+ "key": {
+ Data: []byte("value"),
+ },
+ }
+
+ actualDeltas := c1.deltas()
+ actualDeltas.Dehydrate() // Prep for comparison
+ require.Equal(t, acctUpdates, actualDeltas.Accts)
+ require.Equal(t, expectedKvMods, actualDeltas.KvMods)
+
+ // Parent should now have deltas
+ c1.commitToParent()
+ actualDeltas = c0.deltas()
+ actualDeltas.Dehydrate() // Prep for comparison
+ require.Equal(t, acctUpdates, actualDeltas.Accts)
+ require.Equal(t, expectedKvMods, actualDeltas.KvMods)
+
+ // Deltas remain valid in child after commit
+ actualDeltas = c0.deltas()
+ actualDeltas.Dehydrate() // Prep for comparison
+ require.Equal(t, acctUpdates, actualDeltas.Accts)
+ require.Equal(t, expectedKvMods, actualDeltas.KvMods)
+}
+
func BenchmarkCowChild(b *testing.B) {
b.ReportAllocs()
cow := makeRoundCowState(nil, bookkeeping.BlockHeader{}, config.ConsensusParams{}, 10000, ledgercore.AccountTotals{}, 16)
@@ -246,3 +291,37 @@ func TestCowChildReflect(t *testing.T) {
require.Containsf(t, cowFieldNames, reflectedCowName, "new field:\"%v\" added to roundCowState, please update roundCowState.reset() to handle it before fixing the test", reflectedCowName)
}
}
+
+func TestCowStateProof(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ version := config.Consensus[protocol.ConsensusCurrentVersion]
+ firstStateproof := basics.Round(version.StateProofInterval * 2)
+ accts0 := ledgertesting.RandomAccounts(20, true)
+ ml := mockLedger{balanceMap: accts0}
+ c0 := makeRoundCowState(
+ &ml, bookkeeping.BlockHeader{}, version,
+ 0, ledgercore.AccountTotals{}, 0)
+
+ c0.SetStateProofNextRound(firstStateproof)
+ stateproofTxn := transactions.StateProofTxnFields{
+ StateProofType: protocol.StateProofBasic,
+ Message: stateproofmsg.Message{LastAttestedRound: uint64(firstStateproof) + version.StateProofInterval},
+ }
+
+ // can not apply state proof for 3*version.StateProofInterval when we expect 2*version.StateProofInterval
+ err := apply.StateProof(stateproofTxn, firstStateproof+1, c0, false)
+ a.ErrorIs(err, apply.ErrExpectedDifferentStateProofRound)
+
+ stateproofTxn.Message.LastAttestedRound = uint64(firstStateproof)
+ err = apply.StateProof(stateproofTxn, firstStateproof+1, c0, false)
+ a.NoError(err)
+ a.Equal(3*basics.Round(version.StateProofInterval), c0.GetStateProofNextRound())
+
+ // try to apply the next stateproof 3*version.StateProofInterval
+ stateproofTxn.Message.LastAttestedRound = 3 * version.StateProofInterval
+ err = apply.StateProof(stateproofTxn, firstStateproof+1, c0, false)
+ a.NoError(err)
+ a.Equal(4*basics.Round(version.StateProofInterval), c0.GetStateProofNextRound())
+}
diff --git a/ledger/internal/eval.go b/ledger/eval/eval.go
index 2d184e4b4..0607f5a38 100644
--- a/ledger/internal/eval.go
+++ b/ledger/eval/eval.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package internal
+package eval
import (
"context"
@@ -30,7 +30,7 @@ import (
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/transactions/verify"
"github.com/algorand/go-algorand/ledger/apply"
- "github.com/algorand/go-algorand/ledger/internal/prefetcher"
+ "github.com/algorand/go-algorand/ledger/eval/prefetcher"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
@@ -47,6 +47,7 @@ type LedgerForCowBase interface {
LookupApplication(basics.Round, basics.Address, basics.AppIndex) (ledgercore.AppResource, error)
LookupKv(basics.Round, string) ([]byte, error)
GetCreatorForRound(basics.Round, basics.CreatableIndex, basics.CreatableType) (basics.Address, bool, error)
+ GetStateProofVerificationContext(stateProofLastAttestedRound basics.Round) (*ledgercore.StateProofVerificationContext, error)
}
// ErrRoundZero is self-explanatory
@@ -337,6 +338,10 @@ func (x *roundCowBase) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error)
return x.l.BlockHdr(r)
}
+func (x *roundCowBase) GetStateProofVerificationContext(stateProofLastAttestedRound basics.Round) (*ledgercore.StateProofVerificationContext, error) {
+ return x.l.GetStateProofVerificationContext(stateProofLastAttestedRound)
+}
+
func (x *roundCowBase) blockHdrCached(r basics.Round) (bookkeeping.BlockHeader, error) {
return x.l.BlockHdrCached(r)
}
@@ -615,6 +620,7 @@ type EvaluatorOptions struct {
Generate bool
MaxTxnBytesPerBlock int
ProtoParams *config.ConsensusParams
+ Tracer logic.EvalTracer
}
// StartEvaluator creates a BlockEvaluator, given a ledger and a block header
@@ -673,6 +679,7 @@ func StartEvaluator(l LedgerForEvaluator, hdr bookkeeping.BlockHeader, evalOpts
genesisHash: l.GenesisHash(),
l: l,
maxTxnBytesPerBlock: evalOpts.MaxTxnBytesPerBlock,
+ Tracer: evalOpts.Tracer,
}
// Preallocate space for the payset so that we don't have to
@@ -781,6 +788,10 @@ func StartEvaluator(l LedgerForEvaluator, hdr bookkeeping.BlockHeader, evalOpts
return nil, fmt.Errorf("overflowed subtracting rewards for block %v", hdr.Round)
}
+ if eval.Tracer != nil {
+ eval.Tracer.BeforeBlock(&eval.block.BlockHeader)
+ }
+
return eval, nil
}
@@ -954,7 +965,8 @@ func (eval *BlockEvaluator) TransactionGroup(txgroup []transactions.SignedTxnWit
eval.Tracer.BeforeTxnGroup(evalParams)
// Ensure we update the tracer before exiting
defer func() {
- eval.Tracer.AfterTxnGroup(evalParams, err)
+ deltas := cow.deltas()
+ eval.Tracer.AfterTxnGroup(evalParams, &deltas, err)
}()
}
@@ -1188,14 +1200,9 @@ func (eval *BlockEvaluator) applyTransaction(tx transactions.Transaction, cow *r
err = apply.ApplicationCall(tx.ApplicationCallTxnFields, tx.Header, cow, &ad, gi, evalParams, ctr)
case protocol.StateProofTx:
- // in case of a StateProofTx transaction, we want to "apply" it only in validate or generate mode. This will deviate the cow's StateProofNextRound depending on
- // whether we're in validate/generate mode or not, however - given that this variable is only being used in these modes, it would be safe.
- // The reason for making this into an exception is that during initialization time, the accounts update is "converting" the recent 320 blocks into deltas to
- // be stored in memory. These deltas don't care about the state proofs, and so we can improve the node load time. Additionally, it save us from
- // performing the validation during catchup, which is another performance boost.
- if eval.validate || eval.generate {
- err = apply.StateProof(tx.StateProofTxnFields, tx.Header.FirstValid, cow, eval.validate)
- }
+ // Applying the StateProof transaction will advance the cow's StateProofNextRound field.
+ // Validation of the StateProof transaction before applying will only occur in validate mode.
+ err = apply.StateProof(tx.StateProofTxnFields, tx.Header.FirstValid, cow, eval.validate)
default:
err = fmt.Errorf("unknown transaction type %v", tx.Type)
@@ -1316,8 +1323,30 @@ func (eval *BlockEvaluator) endOfBlock() error {
if !eval.block.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment.IsEqual(expectedVoters) {
return fmt.Errorf("StateProofVotersCommitment wrong: %v != %v", eval.block.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment, expectedVoters)
}
- if eval.block.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight != expectedVotersWeight {
- return fmt.Errorf("StateProofOnlineTotalWeight wrong: %v != %v", eval.block.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight, expectedVotersWeight)
+ if eval.proto.ExcludeExpiredCirculation {
+ if eval.block.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight != expectedVotersWeight {
+ return fmt.Errorf("StateProofOnlineTotalWeight wrong: %v != %v", eval.block.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight, expectedVotersWeight)
+ }
+ } else {
+ if eval.block.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight != expectedVotersWeight {
+ actualVotersWeight := eval.block.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight
+ var highWeight, lowWeight basics.MicroAlgos
+ if expectedVotersWeight.LessThan(actualVotersWeight) {
+ highWeight = actualVotersWeight
+ lowWeight = expectedVotersWeight
+ } else {
+ highWeight = expectedVotersWeight
+ lowWeight = actualVotersWeight
+ }
+ const stakeDiffusionFactor = 5
+ allowedDelta, overflowed := basics.Muldiv(expectedVotersWeight.Raw, stakeDiffusionFactor, 100)
+ if overflowed {
+ return fmt.Errorf("StateProofOnlineTotalWeight overflow: %v != %v", actualVotersWeight, expectedVotersWeight)
+ }
+ if (highWeight.Raw - lowWeight.Raw) > allowedDelta {
+ return fmt.Errorf("StateProofOnlineTotalWeight wrong: %v != %v greater than %d", actualVotersWeight, expectedVotersWeight, allowedDelta)
+ }
+ }
}
if eval.block.StateProofTracking[protocol.StateProofBasic].StateProofNextRound != eval.state.GetStateProofNextRound() {
return fmt.Errorf("StateProofNextRound wrong: %v != %v", eval.block.StateProofTracking[protocol.StateProofBasic].StateProofNextRound, eval.state.GetStateProofNextRound())
@@ -1334,6 +1363,10 @@ func (eval *BlockEvaluator) endOfBlock() error {
return err
}
+ if eval.Tracer != nil {
+ eval.Tracer.AfterBlock(&eval.block.BlockHeader)
+ }
+
return nil
}
@@ -1544,7 +1577,7 @@ func (validator *evalTxValidator) run() {
// Validate: Eval(ctx, l, blk, true, txcache, executionPool)
// AddBlock: Eval(context.Background(), l, blk, false, txcache, nil)
// tracker: Eval(context.Background(), l, blk, false, txcache, nil)
-func Eval(ctx context.Context, l LedgerForEvaluator, blk bookkeeping.Block, validate bool, txcache verify.VerifiedTransactionCache, executionPool execpool.BacklogPool) (ledgercore.StateDelta, error) {
+func Eval(ctx context.Context, l LedgerForEvaluator, blk bookkeeping.Block, validate bool, txcache verify.VerifiedTransactionCache, executionPool execpool.BacklogPool, tracer logic.EvalTracer) (ledgercore.StateDelta, error) {
// flush the pending writes in the cache to make everything read so far available during eval
l.FlushCaches()
@@ -1553,6 +1586,7 @@ func Eval(ctx context.Context, l LedgerForEvaluator, blk bookkeeping.Block, vali
PaysetHint: len(blk.Payset),
Validate: validate,
Generate: false,
+ Tracer: tracer,
})
if err != nil {
return ledgercore.StateDelta{}, err
diff --git a/ledger/internal/eval_test.go b/ledger/eval/eval_test.go
index 0d4e6aaa3..08a8b8c82 100644
--- a/ledger/internal/eval_test.go
+++ b/ledger/eval/eval_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package internal
+package eval
import (
"bytes"
@@ -31,17 +31,14 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merklesignature"
- "github.com/algorand/go-algorand/crypto/stateproof"
"github.com/algorand/go-algorand/data/basics"
basics_testing "github.com/algorand/go-algorand/data/basics/testing"
"github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/stateproofmsg"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/transactions/logic/mocktracer"
"github.com/algorand/go-algorand/data/transactions/verify"
"github.com/algorand/go-algorand/data/txntest"
- "github.com/algorand/go-algorand/ledger/apply"
"github.com/algorand/go-algorand/ledger/ledgercore"
ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/protocol"
@@ -74,7 +71,7 @@ func TestBlockEvaluatorFeeSink(t *testing.T) {
genesisBlockHeader, err := l.BlockHdr(basics.Round(0))
require.NoError(t, err)
newBlock := bookkeeping.MakeBlock(genesisBlockHeader)
- eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0, nil)
require.NoError(t, err)
require.Equal(t, eval.specials.FeeSink, testSinkAddr)
}
@@ -93,7 +90,7 @@ func testEvalAppGroup(t *testing.T, schema basics.StateSchema) (*BlockEvaluator,
blkHeader, err := l.BlockHdr(basics.Round(0))
require.NoError(t, err)
newBlock := bookkeeping.MakeBlock(blkHeader)
- eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0, nil)
require.NoError(t, err)
eval.validate = true
eval.generate = false
@@ -139,7 +136,7 @@ ok:
Type: protocol.ApplicationCallTx,
Header: header,
ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
- ApplicationID: 1,
+ ApplicationID: 1001,
},
}
@@ -157,7 +154,7 @@ ok:
EvalDelta: transactions.EvalDelta{GlobalDelta: map[string]basics.ValueDelta{
"creator": {Action: basics.SetBytesAction, Bytes: string(addrs[0][:])}},
},
- ApplicationID: 1,
+ ApplicationID: 1001,
},
},
{
@@ -198,86 +195,11 @@ func TestEvalAppAllocStateWithTxnGroup(t *testing.T) {
require.NoError(t, err)
deltas := eval.state.deltas()
ad, _ := deltas.Accts.GetBasicsAccountData(addr)
- state := ad.AppParams[1].GlobalState
+ state := ad.AppParams[1001].GlobalState
require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addr[:])}, state["caller"])
require.Equal(t, basics.TealValue{Type: basics.TealBytesType, Bytes: string(addr[:])}, state["creator"])
}
-func TestCowStateProof(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var spType protocol.StateProofType
- var stateProof stateproof.StateProof
- var atRound basics.Round
- var validate bool
- msg := stateproofmsg.Message{}
-
- accts0 := ledgertesting.RandomAccounts(20, true)
- blocks := make(map[basics.Round]bookkeeping.BlockHeader)
- blockErr := make(map[basics.Round]error)
- ml := mockLedger{balanceMap: accts0, blocks: blocks, blockErr: blockErr}
- c0 := makeRoundCowState(
- &ml, bookkeeping.BlockHeader{}, config.Consensus[protocol.ConsensusCurrentVersion],
- 0, ledgercore.AccountTotals{}, 0)
-
- spType = protocol.StateProofType(1234) // bad stateproof type
- stateProofTx := transactions.StateProofTxnFields{
- StateProofType: spType,
- StateProof: stateProof,
- Message: msg,
- }
- err := apply.StateProof(stateProofTx, atRound, c0, validate)
- require.ErrorIs(t, err, apply.ErrStateProofTypeNotSupported)
-
- // no spRnd block
- stateProofTx.StateProofType = protocol.StateProofBasic
- noBlockErr := errors.New("no block")
- blockErr[3] = noBlockErr
- stateProofTx.Message.LastAttestedRound = 3
- err = apply.StateProof(stateProofTx, atRound, c0, validate)
- require.Contains(t, err.Error(), "no block")
-
- // stateproof txn doesn't confirm the next state proof round. expected is in the past
- validate = true
- stateProofTx.Message.LastAttestedRound = uint64(16)
- c0.SetStateProofNextRound(8)
- err = apply.StateProof(stateProofTx, atRound, c0, validate)
- require.ErrorIs(t, err, apply.ErrExpectedDifferentStateProofRound)
-
- // stateproof txn doesn't confirm the next state proof round. expected is in the future
- validate = true
- stateProofTx.Message.LastAttestedRound = uint64(16)
- c0.SetStateProofNextRound(32)
- err = apply.StateProof(stateProofTx, atRound, c0, validate)
- require.ErrorIs(t, err, apply.ErrExpectedDifferentStateProofRound)
-
- // no votersRnd block
- // this is slightly a mess of things that don't quite line up with likely usage
- validate = true
- var spHdr bookkeeping.BlockHeader
- spHdr.CurrentProtocol = "TestCowStateProof"
- spHdr.Round = 1
- proto := config.Consensus[spHdr.CurrentProtocol]
- proto.StateProofInterval = 2
- config.Consensus[spHdr.CurrentProtocol] = proto
- blocks[spHdr.Round] = spHdr
-
- spHdr.Round = 15
- blocks[spHdr.Round] = spHdr
- stateProofTx.Message.LastAttestedRound = uint64(spHdr.Round)
- c0.SetStateProofNextRound(15)
- blockErr[13] = noBlockErr
- err = apply.StateProof(stateProofTx, atRound, c0, validate)
- require.Contains(t, err.Error(), "no block")
-
- // fall through to no err
- validate = false
- err = apply.StateProof(stateProofTx, atRound, c0, validate)
- require.NoError(t, err)
-
- // 100% coverage
-}
-
// a couple trivial tests that don't need setup
// see TestBlockEvaluator for more
func TestTestTransactionGroup(t *testing.T) {
@@ -353,13 +275,14 @@ func TestTransactionGroupWithTracer(t *testing.T) {
t.Parallel()
genesisInitState, addrs, keys := ledgertesting.Genesis(10)
- innerAppID := basics.AppIndex(3)
+ basicAppID := basics.AppIndex(1001)
+ innerAppID := basics.AppIndex(1003)
innerAppAddress := innerAppID.Address()
balances := genesisInitState.Accounts
balances[innerAppAddress] = basics_testing.MakeAccountData(basics.Offline, basics.MicroAlgos{Raw: 1_000_000})
genesisBalances := bookkeeping.GenesisBalances{
- Balances: genesisInitState.Accounts,
+ Balances: balances,
FeeSink: testSinkAddr,
RewardsPool: testPoolAddr,
Timestamp: 0,
@@ -369,7 +292,8 @@ func TestTransactionGroupWithTracer(t *testing.T) {
blkHeader, err := l.BlockHdr(basics.Round(0))
require.NoError(t, err)
newBlock := bookkeeping.MakeBlock(blkHeader)
- eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ tracer := &mocktracer.Tracer{}
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0, tracer)
require.NoError(t, err)
eval.validate = true
eval.generate = true
@@ -420,7 +344,7 @@ int 1`,
// an app call with inner txn
innerAppCallTxn := txntest.Txn{
Type: protocol.ApplicationCallTx,
- Sender: addrs[0],
+ Sender: addrs[4],
ClearStateProgram: `#pragma version 6
int 1`,
@@ -429,25 +353,54 @@ int 1`,
Fee: minFee,
GenesisHash: genHash,
}
+
+ expectedFeeSinkDataForScenario := ledgercore.ToAccountData(balances[testSinkAddr])
+ expectedFeeSinkDataForScenario.MicroAlgos.Raw += basicAppCallTxn.Txn().Fee.Raw
+ if testCase.firstTxnBehavior == "approve" {
+ expectedFeeSinkDataForScenario.MicroAlgos.Raw += payTxn.Txn().Fee.Raw
+ }
+
scenario := testCase.innerAppCallScenario(mocktracer.TestScenarioInfo{
- CallingTxn: innerAppCallTxn.Txn(),
- MinFee: minFee,
- CreatedAppID: innerAppID,
+ CallingTxn: innerAppCallTxn.Txn(),
+ SenderData: ledgercore.ToAccountData(balances[addrs[4]]),
+ AppAccountData: ledgercore.ToAccountData(balances[innerAppAddress]),
+ FeeSinkData: expectedFeeSinkDataForScenario,
+ FeeSinkAddr: testSinkAddr,
+ MinFee: minFee,
+ CreatedAppID: innerAppID,
+ BlockHeader: eval.block.BlockHeader,
+ PrevTimestamp: blkHeader.TimeStamp,
})
innerAppCallTxn.ApprovalProgram = scenario.Program
txntest.Group(&basicAppCallTxn, &payTxn, &innerAppCallTxn)
+ // Update the expected state delta to reflect the inner app call txid
+ scenarioTxidValue, ok := scenario.ExpectedStateDelta.Txids[transactions.Txid{}]
+ if ok {
+ delete(scenario.ExpectedStateDelta.Txids, transactions.Txid{})
+ scenario.ExpectedStateDelta.Txids[innerAppCallTxn.Txn().ID()] = scenarioTxidValue
+ }
+ for i := range scenario.ExpectedEvents {
+ deltas := scenario.ExpectedEvents[i].Deltas
+ if deltas == nil {
+ continue
+ }
+ txidValue, ok := deltas.Txids[transactions.Txid{}]
+ if ok {
+ delete(deltas.Txids, transactions.Txid{})
+ deltas.Txids[innerAppCallTxn.Txn().ID()] = txidValue
+ }
+ }
+
txgroup := transactions.WrapSignedTxnsWithAD([]transactions.SignedTxn{
basicAppCallTxn.Txn().Sign(keys[0]),
payTxn.Txn().Sign(keys[1]),
- innerAppCallTxn.Txn().Sign(keys[0]),
+ innerAppCallTxn.Txn().Sign(keys[4]),
})
require.Len(t, eval.block.Payset, 0)
- tracer := &mocktracer.Tracer{}
- eval.Tracer = tracer
err = eval.TransactionGroup(txgroup)
switch testCase.firstTxnBehavior {
case "approve":
@@ -467,7 +420,7 @@ int 1`,
}
expectedBasicAppCallAD := transactions.ApplyData{
- ApplicationID: 1,
+ ApplicationID: basicAppID,
EvalDelta: transactions.EvalDelta{
GlobalDelta: basics.StateDelta{},
LocalDeltas: map[uint64]basics.StateDelta{},
@@ -481,9 +434,108 @@ int 1`,
},
}
- var expectedEvents []mocktracer.Event
+ expectedFeeSinkData := ledgercore.ToAccountData(balances[testSinkAddr])
+ expectedFeeSinkData.MicroAlgos.Raw += txgroup[0].Txn.Fee.Raw
+ expectedAcct0Data := ledgercore.ToAccountData(balances[addrs[0]])
+ expectedAcct0Data.MicroAlgos.Raw -= txgroup[0].Txn.Fee.Raw
+ expectedAcct0Data.TotalAppParams = 1
+
+ expectedBlockHeader := eval.block.BlockHeader
+ expectedBasicAppCallDelta := ledgercore.StateDelta{
+ Accts: ledgercore.AccountDeltas{
+ Accts: []ledgercore.BalanceRecord{
+ {
+ Addr: addrs[0],
+ AccountData: expectedAcct0Data,
+ },
+ {
+ Addr: testSinkAddr,
+ AccountData: expectedFeeSinkData,
+ },
+ },
+ AppResources: []ledgercore.AppResourceRecord{
+ {
+ Aidx: basicAppID,
+ Addr: addrs[0],
+ Params: ledgercore.AppParamsDelta{
+ Params: &basics.AppParams{
+ ApprovalProgram: txgroup[0].Txn.ApprovalProgram,
+ ClearStateProgram: txgroup[0].Txn.ClearStateProgram,
+ },
+ },
+ },
+ },
+ },
+ Creatables: map[basics.CreatableIndex]ledgercore.ModifiedCreatable{
+ basics.CreatableIndex(basicAppID): {
+ Ctype: basics.AppCreatable,
+ Created: true,
+ Creator: addrs[0],
+ },
+ },
+ Txids: map[transactions.Txid]ledgercore.IncludedTransactions{
+ txgroup[0].Txn.ID(): {
+ LastValid: txgroup[0].Txn.LastValid,
+ Intra: 0,
+ },
+ },
+ Hdr: &expectedBlockHeader,
+ PrevTimestamp: blkHeader.TimeStamp,
+ }
+ expectedBasicAppCallDelta.Hydrate()
+
+ expectedEvents := []mocktracer.Event{mocktracer.BeforeBlock(eval.block.Round())}
if testCase.firstTxnBehavior == "approve" {
- expectedEvents = mocktracer.FlattenEvents([][]mocktracer.Event{
+ err = eval.endOfBlock()
+ require.NoError(t, err)
+
+ expectedAcct1Data := ledgercore.AccountData{}
+ expectedAcct2Data := ledgercore.ToAccountData(balances[addrs[2]])
+ expectedAcct2Data.MicroAlgos.Raw += payTxn.Amount
+ expectedAcct3Data := ledgercore.ToAccountData(balances[addrs[3]])
+ expectedAcct3Data.MicroAlgos.Raw += expectedPayTxnAD.ClosingAmount.Raw
+ expectedFeeSinkData.MicroAlgos.Raw += txgroup[1].Txn.Fee.Raw
+
+ expectedPayTxnDelta := ledgercore.StateDelta{
+ Accts: ledgercore.AccountDeltas{
+ Accts: []ledgercore.BalanceRecord{
+ {
+ Addr: addrs[1],
+ AccountData: expectedAcct1Data,
+ },
+ {
+ Addr: testSinkAddr,
+ AccountData: expectedFeeSinkData,
+ },
+ {
+ Addr: addrs[2],
+ AccountData: expectedAcct2Data,
+ },
+ {
+ Addr: addrs[3],
+ AccountData: expectedAcct3Data,
+ },
+ },
+ },
+ Txids: map[transactions.Txid]ledgercore.IncludedTransactions{
+ txgroup[1].Txn.ID(): {
+ LastValid: txgroup[1].Txn.LastValid,
+ Intra: 0, // will be incremented once merged
+ },
+ },
+ Hdr: &expectedBlockHeader,
+ PrevTimestamp: blkHeader.TimeStamp,
+ }
+ expectedPayTxnDelta.Hydrate()
+
+ expectedDelta := mocktracer.MergeStateDeltas(expectedBasicAppCallDelta, expectedPayTxnDelta, scenario.ExpectedStateDelta)
+
+ // If the scenario failed, we expect the failed txn ID to be removed from the group state delta
+ if scenario.Outcome != mocktracer.ApprovalOutcome {
+ delete(expectedDelta.Txids, txgroup[2].ID())
+ }
+
+ expectedEvents = append(expectedEvents, mocktracer.FlattenEvents([][]mocktracer.Event{
{
mocktracer.BeforeTxnGroup(3),
mocktracer.BeforeTxn(protocol.ApplicationCallTx), // start basicAppCallTxn
@@ -498,14 +550,18 @@ int 1`,
},
scenario.ExpectedEvents,
{
- mocktracer.AfterTxnGroup(3, scenario.Outcome != mocktracer.ApprovalOutcome),
+ mocktracer.AfterTxnGroup(3, &expectedDelta, scenario.Outcome != mocktracer.ApprovalOutcome),
+ mocktracer.AfterBlock(eval.block.Round()),
},
- })
+ })...)
} else {
+ // Removed failed txid from expected state delta
+ delete(expectedBasicAppCallDelta.Txids, txgroup[0].Txn.ID())
+
hasError := testCase.firstTxnBehavior == "error"
// EvalDeltas are removed from failed app call transactions
expectedBasicAppCallAD.EvalDelta = transactions.EvalDelta{}
- expectedEvents = mocktracer.FlattenEvents([][]mocktracer.Event{
+ expectedEvents = append(expectedEvents, mocktracer.FlattenEvents([][]mocktracer.Event{
{
mocktracer.BeforeTxnGroup(3),
mocktracer.BeforeTxn(protocol.ApplicationCallTx), // start basicAppCallTxn
@@ -515,11 +571,12 @@ int 1`,
{
mocktracer.AfterProgram(logic.ModeApp, hasError),
mocktracer.AfterTxn(protocol.ApplicationCallTx, expectedBasicAppCallAD, true), // end basicAppCallTxn
- mocktracer.AfterTxnGroup(3, true),
+ mocktracer.AfterTxnGroup(3, &expectedBasicAppCallDelta, true),
},
- })
+ })...)
}
- require.Equal(t, expectedEvents, mocktracer.StripInnerTxnGroupIDsFromEvents(tracer.Events))
+ actualEvents := mocktracer.StripInnerTxnGroupIDsFromEvents(tracer.Events)
+ mocktracer.AssertEventsEqual(t, expectedEvents, actualEvents)
})
}
}
@@ -579,7 +636,7 @@ func testnetFixupExecution(t *testing.T, headerRound basics.Round, poolBonus uin
l.genesisHash = genesisInitState.GenesisHash
newBlock := bookkeeping.MakeBlock(genesisInitState.Block.BlockHeader)
- eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0, nil)
require.NoError(t, err)
// won't work before funding bank
@@ -673,6 +730,8 @@ type evalTestLedger struct {
feeSink basics.Address
rewardsPool basics.Address
latestTotals ledgercore.AccountTotals
+ tracer logic.EvalTracer
+ boxes map[string][]byte
}
// newTestLedger creates a in memory Ledger that is as realistic as
@@ -683,10 +742,15 @@ func newTestLedger(t testing.TB, balances bookkeeping.GenesisBalances) *evalTest
roundBalances: make(map[basics.Round]map[basics.Address]basics.AccountData),
feeSink: balances.FeeSink,
rewardsPool: balances.RewardsPool,
+ tracer: nil,
+ boxes: make(map[string][]byte),
}
+ protoVersion := protocol.ConsensusFuture
+ proto := config.Consensus[protoVersion]
+
crypto.RandBytes(l.genesisHash[:])
- genBlock, err := bookkeeping.MakeGenesisBlock(protocol.ConsensusFuture,
+ genBlock, err := bookkeeping.MakeGenesisBlock(protoVersion,
balances, "test", l.genesisHash)
require.NoError(t, err)
l.roundBalances[0] = balances.Balances
@@ -694,12 +758,11 @@ func newTestLedger(t testing.TB, balances bookkeeping.GenesisBalances) *evalTest
// calculate the accounts totals.
var ot basics.OverflowTracker
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
for _, acctData := range balances.Balances {
l.latestTotals.AddAccount(proto, ledgercore.ToAccountData(acctData), &ot)
}
l.genesisProto = proto
- l.genesisProtoVersion = protocol.ConsensusCurrentVersion
+ l.genesisProtoVersion = protoVersion
require.False(t, genBlock.FeeSink.IsZero())
require.False(t, genBlock.RewardsPool.IsZero())
@@ -713,7 +776,7 @@ func newTestLedger(t testing.TB, balances bookkeeping.GenesisBalances) *evalTest
func (ledger *evalTestLedger) Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ledgercore.ValidatedBlock, error) {
verifiedTxnCache := verify.MakeVerifiedTransactionCache(config.GetDefaultLocal().VerifiedTranscationsCacheSize)
- delta, err := Eval(ctx, ledger, blk, true, verifiedTxnCache, executionPool)
+ delta, err := Eval(ctx, ledger, blk, true, verifiedTxnCache, executionPool, ledger.tracer)
if err != nil {
return nil, err
}
@@ -726,13 +789,14 @@ func (ledger *evalTestLedger) Validate(ctx context.Context, blk bookkeeping.Bloc
// of the block that the caller is planning to evaluate. If the length of the
// payset being evaluated is known in advance, a paysetHint >= 0 can be
// passed, avoiding unnecessary payset slice growth.
-func (ledger *evalTestLedger) StartEvaluator(hdr bookkeeping.BlockHeader, paysetHint, maxTxnBytesPerBlock int) (*BlockEvaluator, error) {
+func (ledger *evalTestLedger) StartEvaluator(hdr bookkeeping.BlockHeader, paysetHint, maxTxnBytesPerBlock int, tracer logic.EvalTracer) (*BlockEvaluator, error) {
return StartEvaluator(ledger, hdr,
EvaluatorOptions{
PaysetHint: paysetHint,
Validate: true,
Generate: true,
MaxTxnBytesPerBlock: maxTxnBytesPerBlock,
+ Tracer: tracer,
})
}
@@ -770,7 +834,7 @@ func (ledger *evalTestLedger) LookupApplication(rnd basics.Round, addr basics.Ad
res := ledgercore.AppResource{}
ad, ok := ledger.roundBalances[rnd][addr]
if !ok {
- return res, fmt.Errorf("no such account %s", addr.String())
+ return res, fmt.Errorf("no such account %s while looking up app", addr.String())
}
if params, ok := ad.AppParams[aidx]; ok {
res.AppParams = &params
@@ -785,7 +849,7 @@ func (ledger *evalTestLedger) LookupAsset(rnd basics.Round, addr basics.Address,
res := ledgercore.AssetResource{}
ad, ok := ledger.roundBalances[rnd][addr]
if !ok {
- return res, fmt.Errorf("no such account %s", addr.String())
+ return res, fmt.Errorf("no such account %s while looking up asset", addr.String())
}
if params, ok := ad.AssetParams[aidx]; ok {
res.AssetParams = &params
@@ -797,7 +861,9 @@ func (ledger *evalTestLedger) LookupAsset(rnd basics.Round, addr basics.Address,
}
func (ledger *evalTestLedger) LookupKv(rnd basics.Round, key string) ([]byte, error) {
- panic("unimplemented")
+ // The test ledger only has one view of the value of a box--no rnd based retrieval is implemented currently
+ val, _ := ledger.boxes[key]
+ return val, nil
}
// GenesisHash returns the genesis hash for this ledger.
@@ -820,6 +886,10 @@ func (ledger *evalTestLedger) Latest() basics.Round {
return basics.Round(len(ledger.blocks)).SubSaturate(1)
}
+func (ledger *evalTestLedger) GetStateProofVerificationContext(_ basics.Round) (*ledgercore.StateProofVerificationContext, error) {
+ return nil, errors.New("evalTestLedger does not implement GetStateProofVerificationContext")
+}
+
// AddValidatedBlock adds a new block to the ledger, after the block has
// been validated by calling Ledger.Validate(). This saves the cost of
// having to re-compute the effect of the block on the ledger state, if
@@ -908,7 +978,7 @@ func (ledger *evalTestLedger) nextBlock(t testing.TB) *BlockEvaluator {
require.NoError(t, err)
nextHdr := bookkeeping.MakeBlock(hdr).BlockHeader
- eval, err := ledger.StartEvaluator(nextHdr, 0, 0)
+ eval, err := ledger.StartEvaluator(nextHdr, 0, 0, nil)
require.NoError(t, err)
return eval
}
@@ -995,6 +1065,10 @@ func (l *testCowBaseLedger) LookupKv(rnd basics.Round, key string) ([]byte, erro
return nil, errors.New("not implemented")
}
+func (l *testCowBaseLedger) GetStateProofVerificationContext(_ basics.Round) (*ledgercore.StateProofVerificationContext, error) {
+ return nil, errors.New("testCowBaseLedger does not implement GetStateProofVerificationContext")
+}
+
func (l *testCowBaseLedger) GetCreatorForRound(_ basics.Round, cindex basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
res := l.creators[0]
l.creators = l.creators[1:]
@@ -1087,7 +1161,7 @@ func TestEvalFunctionForExpiredAccounts(t *testing.T) {
newBlock := bookkeeping.MakeBlock(l.blocks[0].BlockHeader)
- blkEval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ blkEval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0, nil)
require.NoError(t, err)
// Advance the evaluator a couple rounds...
@@ -1124,7 +1198,7 @@ func TestEvalFunctionForExpiredAccounts(t *testing.T) {
validatedBlock, err := blkEval.GenerateBlock()
require.NoError(t, err)
- _, err = Eval(context.Background(), l, validatedBlock.Block(), false, nil, nil)
+ _, err = Eval(context.Background(), l, validatedBlock.Block(), false, nil, nil, l.tracer)
require.NoError(t, err)
acctData, _ := blkEval.state.lookup(recvAddr)
@@ -1135,7 +1209,7 @@ func TestEvalFunctionForExpiredAccounts(t *testing.T) {
badBlock := *validatedBlock
// First validate that bad block is fine if we dont touch it...
- _, err = Eval(context.Background(), l, badBlock.Block(), true, verify.GetMockedCache(true), nil)
+ _, err = Eval(context.Background(), l, badBlock.Block(), true, verify.GetMockedCache(true), nil, l.tracer)
require.NoError(t, err)
badBlock = *validatedBlock
@@ -1145,7 +1219,7 @@ func TestEvalFunctionForExpiredAccounts(t *testing.T) {
badBlockObj.ExpiredParticipationAccounts = append(badBlockObj.ExpiredParticipationAccounts, basics.Address{1})
badBlock = ledgercore.MakeValidatedBlock(badBlockObj, badBlock.Delta())
- _, err = Eval(context.Background(), l, badBlock.Block(), true, verify.GetMockedCache(true), nil)
+ _, err = Eval(context.Background(), l, badBlock.Block(), true, verify.GetMockedCache(true), nil, l.tracer)
require.Error(t, err)
badBlock = *validatedBlock
@@ -1159,7 +1233,7 @@ func TestEvalFunctionForExpiredAccounts(t *testing.T) {
}
badBlock = ledgercore.MakeValidatedBlock(badBlockObj, badBlock.Delta())
- _, err = Eval(context.Background(), l, badBlock.Block(), true, verify.GetMockedCache(true), nil)
+ _, err = Eval(context.Background(), l, badBlock.Block(), true, verify.GetMockedCache(true), nil, l.tracer)
require.Error(t, err)
badBlock = *validatedBlock
@@ -1169,12 +1243,12 @@ func TestEvalFunctionForExpiredAccounts(t *testing.T) {
badBlockObj.ExpiredParticipationAccounts = append(badBlockObj.ExpiredParticipationAccounts, badBlockObj.ExpiredParticipationAccounts[0])
badBlock = ledgercore.MakeValidatedBlock(badBlockObj, badBlock.Delta())
- _, err = Eval(context.Background(), l, badBlock.Block(), true, verify.GetMockedCache(true), nil)
+ _, err = Eval(context.Background(), l, badBlock.Block(), true, verify.GetMockedCache(true), nil, l.tracer)
require.Error(t, err)
badBlock = *validatedBlock
// sanity check that bad block is being actually copied and not just the pointer
- _, err = Eval(context.Background(), l, badBlock.Block(), true, verify.GetMockedCache(true), nil)
+ _, err = Eval(context.Background(), l, badBlock.Block(), true, verify.GetMockedCache(true), nil, l.tracer)
require.NoError(t, err)
}
@@ -1228,7 +1302,7 @@ func TestExpiredAccountGenerationWithDiskFailure(t *testing.T) {
newBlock := bookkeeping.MakeBlock(l.blocks[0].BlockHeader)
- eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0, nil)
require.NoError(t, err)
// Advance the evaluator a couple rounds...
@@ -1326,7 +1400,7 @@ func TestExpiredAccountGeneration(t *testing.T) {
newBlock := bookkeeping.MakeBlock(l.blocks[0].BlockHeader)
- eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0, nil)
require.NoError(t, err)
// Advance the evaluator a couple rounds...
diff --git a/ledger/internal/evalindexer.go b/ledger/eval/evalindexer.go
index babf6c87d..af3a5546c 100644
--- a/ledger/internal/evalindexer.go
+++ b/ledger/eval/evalindexer.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package internal
+package eval
import (
"fmt"
diff --git a/ledger/internal/prefetcher/error.go b/ledger/eval/prefetcher/error.go
index 77c1cb999..77c1cb999 100644
--- a/ledger/internal/prefetcher/error.go
+++ b/ledger/eval/prefetcher/error.go
diff --git a/ledger/internal/prefetcher/prefetcher.go b/ledger/eval/prefetcher/prefetcher.go
index e00d78f70..e00d78f70 100644
--- a/ledger/internal/prefetcher/prefetcher.go
+++ b/ledger/eval/prefetcher/prefetcher.go
diff --git a/ledger/internal/prefetcher/prefetcher_alignment_test.go b/ledger/eval/prefetcher/prefetcher_alignment_test.go
index 1d5291cf1..efb9e683b 100644
--- a/ledger/internal/prefetcher/prefetcher_alignment_test.go
+++ b/ledger/eval/prefetcher/prefetcher_alignment_test.go
@@ -26,11 +26,13 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merklesignature"
+ "github.com/algorand/go-algorand/crypto/stateproof"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/stateproofmsg"
"github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/ledger/internal"
- "github.com/algorand/go-algorand/ledger/internal/prefetcher"
+ "github.com/algorand/go-algorand/ledger/eval"
+ "github.com/algorand/go-algorand/ledger/eval/prefetcher"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
@@ -102,6 +104,11 @@ func (l *prefetcherAlignmentTestLedger) BlockHdrCached(round basics.Round) (book
func (l *prefetcherAlignmentTestLedger) CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error {
return nil
}
+
+func (l *prefetcherAlignmentTestLedger) GetStateProofVerificationContext(_ basics.Round) (*ledgercore.StateProofVerificationContext, error) {
+ return nil, fmt.Errorf("prefetcherAlignmentTestLedger does not implement GetStateProofVerificationContext")
+}
+
func (l *prefetcherAlignmentTestLedger) LookupWithoutRewards(_ basics.Round, addr basics.Address) (ledgercore.AccountData, basics.Round, error) {
l.mu.Lock()
if l.requestedBalances == nil {
@@ -290,7 +297,7 @@ func runEval(t *testing.T, l *prefetcherAlignmentTestLedger, txn transactions.Tr
require.NoError(t, err)
block := bookkeeping.MakeBlock(genesisBlockHeader)
- eval, err := internal.StartEvaluator(l, block.BlockHeader, internal.EvaluatorOptions{})
+ eval, err := eval.StartEvaluator(l, block.BlockHeader, eval.EvaluatorOptions{})
require.NoError(t, err)
err = eval.TransactionGroup(makeGroupFromTxn(txn))
@@ -1367,7 +1374,17 @@ func TestEvaluatorPrefetcherAlignmentStateProof(t *testing.T) {
Sender: addr,
GenesisHash: genesisHash(),
},
- StateProofTxnFields: transactions.StateProofTxnFields{},
+ StateProofTxnFields: transactions.StateProofTxnFields{
+ StateProofType: 0,
+ StateProof: stateproof.StateProof{},
+ Message: stateproofmsg.Message{
+ BlockHeadersCommitment: nil,
+ VotersCommitment: nil,
+ LnProvenWeight: 0,
+ FirstAttestedRound: 257,
+ LastAttestedRound: 512,
+ },
+ },
}
requested, prefetched := run(t, l, txn)
diff --git a/ledger/internal/prefetcher/prefetcher_test.go b/ledger/eval/prefetcher/prefetcher_test.go
index b9c1d80eb..2eff5f344 100644
--- a/ledger/internal/prefetcher/prefetcher_test.go
+++ b/ledger/eval/prefetcher/prefetcher_test.go
@@ -28,7 +28,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/ledger/internal/prefetcher"
+ "github.com/algorand/go-algorand/ledger/eval/prefetcher"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
diff --git a/ledger/internal/prefetcher/prefetcher_whitebox_test.go b/ledger/eval/prefetcher/prefetcher_whitebox_test.go
index 6a8738b48..6a8738b48 100644
--- a/ledger/internal/prefetcher/prefetcher_whitebox_test.go
+++ b/ledger/eval/prefetcher/prefetcher_whitebox_test.go
diff --git a/ledger/eval/txntracer.go b/ledger/eval/txntracer.go
new file mode 100644
index 000000000..036ad773d
--- /dev/null
+++ b/ledger/eval/txntracer.go
@@ -0,0 +1,185 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package eval
+
+import (
+ "fmt"
+
+ "github.com/algorand/go-deadlock"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+)
+
+// TxnGroupDeltaWithIds associates all the Ids (group and Txn) with a single state delta object
+type TxnGroupDeltaWithIds struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+ Ids []string
+ Delta StateDeltaSubset
+}
+
+// StateDeltaSubset exports a subset of ledgercore.StateDelta fields for a sparse encoding
+type StateDeltaSubset struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+ Accts ledgercore.AccountDeltas
+ KvMods map[string]ledgercore.KvValueDelta
+ Txids map[transactions.Txid]ledgercore.IncludedTransactions
+ Txleases map[ledgercore.Txlease]basics.Round
+ Creatables map[basics.CreatableIndex]ledgercore.ModifiedCreatable
+ Hdr *bookkeeping.BlockHeader
+}
+
+func convertStateDelta(delta ledgercore.StateDelta) StateDeltaSubset {
+ // The StateDelta object returned through the EvalTracer has its values deleted between txn groups to avoid
+ // reallocation during evaluation.
+ // This means the map values need to be copied (to avoid deletion) since they are all passed by reference.
+ kvmods := make(map[string]ledgercore.KvValueDelta, len(delta.KvMods))
+ for k1, v1 := range delta.KvMods {
+ kvmods[k1] = v1
+ }
+ txids := make(map[transactions.Txid]ledgercore.IncludedTransactions, len(delta.Txids))
+ for k2, v2 := range delta.Txids {
+ txids[k2] = v2
+ }
+ txleases := make(map[ledgercore.Txlease]basics.Round, len(delta.Txleases))
+ for k3, v3 := range delta.Txleases {
+ txleases[k3] = v3
+ }
+ creatables := make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable, len(delta.Creatables))
+ for k4, v4 := range delta.Creatables {
+ creatables[k4] = v4
+ }
+ var accR []ledgercore.BalanceRecord
+ var appR []ledgercore.AppResourceRecord
+ var assetR []ledgercore.AssetResourceRecord
+ if len(delta.Accts.Accts) > 0 {
+ accR = make([]ledgercore.BalanceRecord, len(delta.Accts.Accts))
+ copy(accR, delta.Accts.Accts)
+ }
+ if len(delta.Accts.AppResources) > 0 {
+ appR = make([]ledgercore.AppResourceRecord, len(delta.Accts.AppResources))
+ copy(appR, delta.Accts.AppResources)
+ }
+ if len(delta.Accts.AssetResources) > 0 {
+ assetR = make([]ledgercore.AssetResourceRecord, len(delta.Accts.AssetResources))
+ copy(assetR, delta.Accts.AssetResources)
+ }
+ return StateDeltaSubset{
+ Accts: ledgercore.AccountDeltas{
+ Accts: accR,
+ AppResources: appR,
+ AssetResources: assetR,
+ },
+ KvMods: kvmods,
+ Txids: txids,
+ Txleases: txleases,
+ Creatables: creatables,
+ Hdr: delta.Hdr,
+ }
+}
+
+// TxnGroupDeltaTracer collects groups of StateDelta objects covering groups of txns
+type TxnGroupDeltaTracer struct {
+ deltasLock deadlock.RWMutex
+ // lookback is the number of rounds stored at any given time
+ lookback uint64
+ // no-op methods we don't care about
+ logic.NullEvalTracer
+ // txnGroupDeltas stores the StateDeltaSubset objects for each round, indexed by all the IDs within the group
+ txnGroupDeltas map[basics.Round]map[crypto.Digest]*StateDeltaSubset
+ // latestRound is the most recent round seen via the BeforeBlock hdr
+ latestRound basics.Round
+}
+
+// MakeTxnGroupDeltaTracer creates a TxnGroupDeltaTracer
+func MakeTxnGroupDeltaTracer(lookback uint64) *TxnGroupDeltaTracer {
+ return &TxnGroupDeltaTracer{
+ lookback: lookback,
+ txnGroupDeltas: make(map[basics.Round]map[crypto.Digest]*StateDeltaSubset),
+ }
+}
+
+// BeforeBlock implements the EvalTracer interface for pre-block evaluation
+func (tracer *TxnGroupDeltaTracer) BeforeBlock(hdr *bookkeeping.BlockHeader) {
+ tracer.deltasLock.Lock()
+ defer tracer.deltasLock.Unlock()
+ // Drop older rounds based on the lookback parameter
+ delete(tracer.txnGroupDeltas, hdr.Round-basics.Round(tracer.lookback))
+ tracer.latestRound = hdr.Round
+ // Initialize the delta map for the round
+ tracer.txnGroupDeltas[tracer.latestRound] = make(map[crypto.Digest]*StateDeltaSubset)
+}
+
+// AfterTxnGroup implements the EvalTracer interface for txn group boundaries
+func (tracer *TxnGroupDeltaTracer) AfterTxnGroup(ep *logic.EvalParams, deltas *ledgercore.StateDelta, evalError error) {
+ if deltas == nil {
+ return
+ }
+ deltaSub := convertStateDelta(*deltas)
+ tracer.deltasLock.Lock()
+ defer tracer.deltasLock.Unlock()
+ txnDeltaMap := tracer.txnGroupDeltas[tracer.latestRound]
+ for _, txn := range ep.TxnGroup {
+ // Add Group ID
+ if !txn.Txn.Group.IsZero() {
+ txnDeltaMap[txn.Txn.Group] = &deltaSub
+ }
+ // Add Txn ID
+ txnDeltaMap[crypto.Digest(txn.ID())] = &deltaSub
+ }
+}
+
+// GetDeltasForRound supplies all StateDelta objects for txn groups in a given rnd
+func (tracer *TxnGroupDeltaTracer) GetDeltasForRound(rnd basics.Round) ([]TxnGroupDeltaWithIds, error) {
+ tracer.deltasLock.RLock()
+ defer tracer.deltasLock.RUnlock()
+ rndEntries, exists := tracer.txnGroupDeltas[rnd]
+ if !exists {
+ return nil, fmt.Errorf("round %d not found in txnGroupDeltaTracer", rnd)
+ }
+ // Dedupe entries in our map and collect Ids
+ var deltas = map[*StateDeltaSubset][]string{}
+ for id, delta := range rndEntries {
+ if _, present := deltas[delta]; !present {
+ deltas[delta] = append(deltas[delta], id.String())
+ }
+ }
+ var deltasForRound []TxnGroupDeltaWithIds
+ for delta, ids := range deltas {
+ deltasForRound = append(deltasForRound, TxnGroupDeltaWithIds{
+ Ids: ids,
+ Delta: *delta,
+ })
+ }
+ return deltasForRound, nil
+}
+
+// GetDeltaForID returns the StateDelta associated with the group of transaction executed for the supplied ID (txn or group)
+func (tracer *TxnGroupDeltaTracer) GetDeltaForID(id crypto.Digest) (StateDeltaSubset, error) {
+ tracer.deltasLock.RLock()
+ defer tracer.deltasLock.RUnlock()
+ for _, deltasForRound := range tracer.txnGroupDeltas {
+ if delta, exists := deltasForRound[id]; exists {
+ return *delta, nil
+ }
+ }
+ return StateDeltaSubset{}, fmt.Errorf("unable to find delta for id: %s", id)
+}
diff --git a/ledger/eval/txntracer_test.go b/ledger/eval/txntracer_test.go
new file mode 100644
index 000000000..4711709c6
--- /dev/null
+++ b/ledger/eval/txntracer_test.go
@@ -0,0 +1,372 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package eval
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ basics_testing "github.com/algorand/go-algorand/data/basics/testing"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/data/transactions/logic/mocktracer"
+ "github.com/algorand/go-algorand/data/txntest"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ ledgertesting "github.com/algorand/go-algorand/ledger/testing"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func TestTransactionGroupWithDeltaTracer(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // In all cases, a group of three transactions is tested. They are:
+ // 1. A basic app call transaction
+ // 2. A payment transaction
+ // 3. An app call transaction that spawns inners. This is from the mocktracer scenarios.
+
+ // We don't care about testing error scenarios here--just exercising different successful txn group evals
+ scenario := mocktracer.GetTestScenarios()["none"]
+ type tracerTestCase struct {
+ name string
+ innerAppCallScenario mocktracer.TestScenarioGenerator
+ }
+ var testCases = []tracerTestCase{
+ {
+ name: "noError",
+ innerAppCallScenario: scenario,
+ },
+ }
+
+ for _, testCase := range testCases {
+ testCase := testCase
+ t.Run(testCase.name, func(t *testing.T) {
+ t.Parallel()
+ // SETUP THE BLOCK
+ genesisInitState, addrs, keys := ledgertesting.Genesis(4)
+
+ // newTestLedger uses ConsensusFuture, so we check it to find out if
+ // we should use 1001 as the initial resources ID.
+ protoVersion := protocol.ConsensusFuture
+ proto := config.Consensus[protoVersion]
+ offset := basics.AppIndex(0)
+ if proto.AppForbidLowResources {
+ offset += 1000
+ }
+
+ innerAppID := basics.AppIndex(3) + offset
+ innerAppAddress := innerAppID.Address()
+ appID := basics.AppIndex(1) + offset
+ appAddress := appID.Address()
+ innerBoxAppID := basics.AppIndex(7) + offset
+ innerBoxAppAddress := innerBoxAppID.Address()
+ balances := genesisInitState.Accounts
+ balances[innerAppAddress] = basics_testing.MakeAccountData(basics.Offline, basics.MicroAlgos{Raw: 1_000_000})
+ balances[appAddress] = basics_testing.MakeAccountData(basics.Offline, basics.MicroAlgos{Raw: 1_000_000})
+
+ genesisBalances := bookkeeping.GenesisBalances{
+ Balances: genesisInitState.Accounts,
+ FeeSink: testSinkAddr,
+ RewardsPool: testPoolAddr,
+ Timestamp: 0,
+ }
+ l := newTestLedger(t, genesisBalances)
+ blkHeader, err := l.BlockHdr(basics.Round(0))
+ require.NoError(t, err)
+ newBlock := bookkeeping.MakeBlock(blkHeader)
+ tracer := MakeTxnGroupDeltaTracer(4)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0, tracer)
+ require.NoError(t, err)
+ eval.validate = true
+ eval.generate = true
+ genHash := l.GenesisHash()
+
+ basicAppCallApproval := `#pragma version 8
+byte "hellobox"
+int 10
+box_create
+pop
+int 1`
+ basicAppCallClear := `#pragma version 8
+int 1`
+ basicAppCallClearOps, err := logic.AssembleString(basicAppCallClear)
+ require.NoError(t, err)
+ basicAppCallApprovalOps, err := logic.AssembleString(basicAppCallApproval)
+ require.NoError(t, err)
+ // a basic app call
+ basicAppCallTxn := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[0],
+ ApprovalProgram: basicAppCallApproval,
+ ClearStateProgram: basicAppCallClear,
+ FirstValid: newBlock.Round(),
+ LastValid: newBlock.Round() + 1000,
+ Fee: minFee,
+ GenesisHash: genHash,
+ Note: []byte("one"),
+ Boxes: []transactions.BoxRef{{
+ Index: 0,
+ Name: []byte("hellobox"),
+ }},
+ }
+
+ // a non-app call txn
+ var txnLease [32]byte
+ copy(txnLease[:], "txnLeaseTest")
+ payTxn := txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: addrs[1],
+ Receiver: addrs[2],
+ CloseRemainderTo: addrs[3],
+ Amount: 1_000_000,
+ FirstValid: newBlock.Round(),
+ LastValid: newBlock.Round() + 1000,
+ Fee: minFee,
+ GenesisHash: genHash,
+ Note: []byte("two"),
+ Lease: txnLease,
+ }
+ // an app call with inner txn
+ v6Clear := `#pragma version 6
+int 1`
+ v6ClearOps, err := logic.AssembleString(v6Clear)
+ require.NoError(t, err)
+ innerAppCallTxn := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[0],
+ ClearStateProgram: v6Clear,
+ FirstValid: newBlock.Round(),
+ LastValid: newBlock.Round() + 1000,
+ Fee: minFee,
+ GenesisHash: genHash,
+ Note: []byte("three"),
+ }
+ scenario := testCase.innerAppCallScenario(mocktracer.TestScenarioInfo{
+ CallingTxn: innerAppCallTxn.Txn(),
+ MinFee: minFee,
+ CreatedAppID: innerAppID,
+ })
+ innerAppCallTxn.ApprovalProgram = scenario.Program
+ innerAppCallApprovalOps, err := logic.AssembleString(scenario.Program)
+ require.NoError(t, err)
+
+ // inner txn with more box mods
+ innerAppCallBoxApproval := `#pragma version 8
+byte "goodbyebox"
+int 10
+box_create
+pop
+byte "goodbyebox"
+int 0
+byte "2"
+box_replace
+byte "goodbyebox"
+box_del
+pop
+int 1`
+ innerAppCallBoxApprovalOps, err := logic.AssembleString(innerAppCallBoxApproval)
+ require.NoError(t, err)
+ innerAppCallBoxTxn := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[0],
+ ApprovalProgram: innerAppCallBoxApproval,
+ ClearStateProgram: basicAppCallClear,
+ FirstValid: newBlock.Round(),
+ LastValid: newBlock.Round() + 1000,
+ Fee: minFee,
+ GenesisHash: genHash,
+ Boxes: []transactions.BoxRef{{
+ Index: 0,
+ Name: []byte("goodbyebox"),
+ }},
+ }
+
+ txntest.Group(&basicAppCallTxn, &payTxn, &innerAppCallTxn, &innerAppCallBoxTxn)
+
+ txgroup := transactions.WrapSignedTxnsWithAD([]transactions.SignedTxn{
+ basicAppCallTxn.Txn().Sign(keys[0]),
+ payTxn.Txn().Sign(keys[1]),
+ innerAppCallTxn.Txn().Sign(keys[0]),
+ innerAppCallBoxTxn.Txn().Sign(keys[0]),
+ })
+
+ require.Len(t, eval.block.Payset, 0)
+
+ err = eval.TransactionGroup(txgroup)
+ require.NoError(t, err)
+ require.Len(t, eval.block.Payset, 4)
+
+ secondPayTxn := txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: addrs[2],
+ Receiver: addrs[1],
+ Amount: 100_000,
+ FirstValid: newBlock.Round(),
+ LastValid: newBlock.Round() + 1000,
+ Fee: minFee,
+ GenesisHash: genHash,
+ }
+ secondTxGroup := transactions.WrapSignedTxnsWithAD([]transactions.SignedTxn{
+ secondPayTxn.Txn().Sign(keys[2]),
+ })
+ err = eval.TransactionGroup(secondTxGroup)
+ require.NoError(t, err)
+
+ expectedAccts := ledgercore.AccountDeltas{
+ Accts: []ledgercore.BalanceRecord{
+ {
+ Addr: addrs[0],
+ AccountData: ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{
+ MicroAlgos: basics.MicroAlgos{Raw: 1666666666663666},
+ TotalAppParams: 3,
+ },
+ },
+ },
+ {
+ Addr: testSinkAddr,
+ AccountData: ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{
+ Status: basics.Status(2),
+ MicroAlgos: basics.MicroAlgos{Raw: 1666666666673666},
+ },
+ },
+ },
+ {
+ Addr: appAddress,
+ AccountData: ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{
+ MicroAlgos: basics.MicroAlgos{Raw: 1000000},
+ TotalBoxes: 1,
+ TotalBoxBytes: 18,
+ },
+ },
+ },
+ {
+ Addr: addrs[1],
+ },
+ {
+ Addr: addrs[2],
+ AccountData: ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{
+ MicroAlgos: basics.MicroAlgos{Raw: 1666666667666666},
+ },
+ },
+ },
+ {
+ Addr: addrs[3],
+ AccountData: ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{
+ MicroAlgos: basics.MicroAlgos{Raw: 3333333332332332},
+ },
+ },
+ },
+ {
+ Addr: innerAppAddress,
+ AccountData: ledgercore.AccountData{
+ AccountBaseData: ledgercore.AccountBaseData{
+ MicroAlgos: basics.MicroAlgos{Raw: 997000},
+ TotalAppParams: 1,
+ },
+ },
+ },
+ {
+ Addr: innerBoxAppAddress,
+ AccountData: ledgercore.AccountData{},
+ },
+ },
+ AppResources: []ledgercore.AppResourceRecord{
+ {
+ Aidx: 1 + offset,
+ Addr: addrs[0],
+ Params: ledgercore.AppParamsDelta{
+ Params: &basics.AppParams{
+ ApprovalProgram: basicAppCallApprovalOps.Program,
+ ClearStateProgram: basicAppCallClearOps.Program,
+ },
+ },
+ },
+ {
+ Aidx: 3 + offset,
+ Addr: addrs[0],
+ Params: ledgercore.AppParamsDelta{
+ Params: &basics.AppParams{
+ ApprovalProgram: innerAppCallApprovalOps.Program,
+ ClearStateProgram: v6ClearOps.Program,
+ },
+ },
+ },
+ {
+ Aidx: 4 + offset,
+ Addr: innerAppAddress,
+ Params: ledgercore.AppParamsDelta{
+ Params: &basics.AppParams{
+ ApprovalProgram: []byte{0x06, 0x80, 0x01, 0x78, 0xb0, 0x81, 0x01}, // #pragma version 6; pushbytes "x"; log; pushint 1
+ ClearStateProgram: v6ClearOps.Program,
+ },
+ },
+ },
+ {
+ Aidx: innerBoxAppID,
+ Addr: addrs[0],
+ Params: ledgercore.AppParamsDelta{
+ Params: &basics.AppParams{
+ ApprovalProgram: innerAppCallBoxApprovalOps.Program,
+ ClearStateProgram: basicAppCallClearOps.Program,
+ },
+ },
+ },
+ },
+ }
+ expectedKvMods := map[string]ledgercore.KvValueDelta{
+ "bx:\x00\x00\x00\x00\x00\x00\x03\xe9hellobox": {
+ OldData: []uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ Data: []uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ },
+ "bx:\x00\x00\x00\x00\x00\x00\x03\xefgoodbyebox": {
+ OldData: nil,
+ Data: nil,
+ },
+ }
+ expectedLeases := map[ledgercore.Txlease]basics.Round{
+ {Sender: payTxn.Sender, Lease: payTxn.Lease}: payTxn.LastValid,
+ }
+
+ actualDelta, err := tracer.GetDeltaForID(crypto.Digest(txgroup[0].ID()))
+ require.NoError(t, err)
+ _, err = tracer.GetDeltaForID(crypto.Digest(txgroup[1].ID()))
+ require.NoError(t, err)
+ _, err = tracer.GetDeltaForID(crypto.Digest(txgroup[2].ID()))
+ require.NoError(t, err)
+ allDeltas, err := tracer.GetDeltasForRound(basics.Round(1))
+ require.NoError(t, err)
+ require.Len(t, allDeltas, 2)
+
+ require.Equal(t, expectedAccts.Accts, actualDelta.Accts.Accts)
+ require.Equal(t, expectedAccts.AppResources, actualDelta.Accts.AppResources)
+ require.Equal(t, expectedAccts.AssetResources, actualDelta.Accts.AssetResources)
+ require.Equal(t, expectedKvMods, actualDelta.KvMods)
+ require.Equal(t, expectedLeases, actualDelta.Txleases)
+ })
+ }
+}
diff --git a/ledger/eval_simple_test.go b/ledger/eval_simple_test.go
index c5c935452..606ee4931 100644
--- a/ledger/eval_simple_test.go
+++ b/ledger/eval_simple_test.go
@@ -18,6 +18,7 @@ package ledger
import (
"context"
+ "encoding/binary"
"fmt"
"reflect"
"strings"
@@ -52,7 +53,7 @@ func TestBlockEvaluator(t *testing.T) {
genesisBlockHeader, err := l.BlockHdr(basics.Round(0))
require.NoError(t, err)
newBlock := bookkeeping.MakeBlock(genesisBlockHeader)
- eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0, nil)
require.NoError(t, err)
genHash := l.GenesisHash()
@@ -210,18 +211,196 @@ func TestBlockEvaluator(t *testing.T) {
require.Equal(t, bal2new.MicroAlgos.Raw, bal2.MicroAlgos.Raw-minFee.Raw)
}
-func TestRekeying(t *testing.T) {
+// TestHoldingGet tests some of the corner cases for the asset_holding_get
+// opcode: the asset doesn't exist, the account doesn't exist, account not opted
+// in, vs it has none of the asset. This is tested here, even though it should
+// be well tested in 'logic' package, because we want to make sure that errors
+// come out of the real ledger in the way that the logic package expects (it
+// uses a mock ledger for testing).
+func TestHoldingGet(t *testing.T) {
partitiontest.PartitionTest(t)
- // t.Parallel() NO! This test manipulates []protocol.Consensus
+ t.Parallel()
+
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // 24 is first version with apps
+ ledgertesting.TestConsensusRange(t, 24, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
+ dl := NewDoubleLedger(t, genBalances, cv, cfg)
+ defer dl.Close()
+
+ makegold := txntest.Txn{
+ Type: protocol.AssetConfigTx,
+ Sender: addrs[0],
+ AssetParams: basics.AssetParams{
+ Total: 10,
+ UnitName: "gold",
+ AssetName: "oz",
+ },
+ }
+
+ // written without assert or swap, so we can use teal v2 and test back to consensus v24
+ source := `
+#pragma version 2
+txn ApplicationID
+bnz main
+int 1; return
+main:
+ txn NumAccounts // Sender, or Accounts[n]
+ txn ApplicationArgs 0; btoi
+ asset_holding_get AssetBalance
+ txn ApplicationArgs 1; btoi; ==; bz bad
+ txn ApplicationArgs 2; btoi; ==; return
+bad: err
+`
+
+ // Advance the ledger so that there's ambiguity of asset index or foreign array index
+ for i := 0; i < 10; i++ {
+ dl.fullBlock(&txntest.Txn{Type: "pay", Sender: addrs[2], Receiver: addrs[2]})
+ }
+
+ create := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[0],
+ ApprovalProgram: source,
+ }
+
+ vb := dl.fullBlock(&create) // create the app
+ checker := basics.AppIndex(vb.Block().TxnCounter)
+ gold := basics.AssetIndex(checker + 2) // doesn't exist yet
+ goldBytes := make([]byte, 8)
+ binary.BigEndian.PutUint64(goldBytes, uint64(gold))
+
+ check := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[0],
+ ApplicationID: checker,
+ ApplicationArgs: [][]byte{goldBytes, {0}, {0}}, // exist=0 value=0
+ }
+
+ dl.fullBlock(&check)
+ vb = dl.fullBlock(&makegold) // Works, despite asset not existing
+ require.EqualValues(t, gold, vb.Block().TxnCounter)
+
+ // confirm hardcoded "gold" is correct
+ b, ok := holding(t, dl.generator, addrs[0], gold)
+ require.True(t, ok)
+ require.EqualValues(t, 10, b)
+
+ // The asset exists now. asset_holding_get gives 1,10 for the creator
+ // (who is auto-opted in)
+ check.ApplicationArgs = [][]byte{goldBytes, {1}, {10}} // exist=1 value=10
+ dl.fullBlock(&check)
+
+ // but still gives 0,0 for un opted-in addrs[1], because it means
+ // "exists" in the given account, i.e. opted in
+ check.Sender = addrs[1]
+ check.ApplicationArgs = [][]byte{goldBytes, {0}, {0}}
+ dl.fullBlock(&check)
+
+ // opt-in addr[1]
+ dl.fullBlock(&txntest.Txn{Type: "axfer", XferAsset: gold, Sender: addrs[1], AssetReceiver: addrs[1]})
+ check.ApplicationArgs = [][]byte{goldBytes, {1}, {0}}
+ dl.fullBlock(&check)
+
+ // non-existent account, with existing asset, cleanly reports exists=0, value=0
+ check.Accounts = []basics.Address{{0x01, 0x02}}
+ check.ApplicationArgs = [][]byte{goldBytes, {0}, {0}}
+ dl.fullBlock(&check)
+ })
+}
+
+// TestLocalGetEx tests some of the corner cases for the app_local_get_ex
+// opcode: the app doesn't exist, the account doesn't exist, account not opted
+// in, local key doesn't exists. This is tested here, even though it should be
+// well tested in 'logic' package, because we want to make sure that errors come
+// out of the real ledger in the way that the logic package expects (it uses a
+// mock ledger for testing).
+func TestLocalGetEx(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
- // Pretend rekeying is supported
- actual := config.Consensus[protocol.ConsensusCurrentVersion]
- pretend := actual
- pretend.SupportRekeying = true
- config.Consensus[protocol.ConsensusCurrentVersion] = pretend
- defer func() {
- config.Consensus[protocol.ConsensusCurrentVersion] = actual
- }()
+ genBalances, addrs, _ := ledgertesting.NewTestGenesis()
+ // 24 is first version with apps
+ ledgertesting.TestConsensusRange(t, 24, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) {
+ dl := NewDoubleLedger(t, genBalances, cv, cfg)
+ defer dl.Close()
+
+ makeapp := txntest.Txn{
+ Type: "appl",
+ Sender: addrs[0],
+ LocalStateSchema: basics.StateSchema{
+ NumUint: 1,
+ },
+ GlobalStateSchema: basics.StateSchema{
+ NumByteSlice: 3,
+ },
+ }
+
+ // written without assert or swap, so we can use teal v2 and test back to consensus v24
+ source := `
+#pragma version 2
+txn ApplicationID
+bnz main
+int 1; return
+main:
+ txn NumAccounts // Sender, or Accounts[n]
+ txn ApplicationArgs 0; btoi
+ byte "KEY"
+ app_local_get_ex
+ txn ApplicationArgs 1; btoi; ==; bz bad
+ txn ApplicationArgs 2; btoi; ==; return
+bad: err
+`
+
+ // Advance the ledger so that there's no ambiguity of app ID or foreign array slot
+ for i := 0; i < 10; i++ {
+ dl.fullBlock(&txntest.Txn{Type: "pay", Sender: addrs[2], Receiver: addrs[2]})
+ }
+
+ create := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[0],
+ ApprovalProgram: source,
+ }
+
+ vb := dl.fullBlock(&create) // create the checker app
+ // Since we are testing back to v24, we can't get appID from EvalDelta
+ checker := basics.AppIndex(vb.Block().TxnCounter)
+ state := checker + 1 // doesn't exist yet
+ stateBytes := make([]byte, 8)
+ binary.BigEndian.PutUint64(stateBytes, uint64(state))
+ check := txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: addrs[0],
+ ApplicationID: checker,
+ ApplicationArgs: [][]byte{stateBytes, {0}, {0}}, // exist=0 value=0
+ }
+
+ // unlike assets, you can't even do `app_local_get_ex` for an address
+ // that has not been opted into the app. For local state, the existence
+ // bit is only used to distinguish "key existence". The local state
+ // bundle MUST exist or the program fails.
+ dl.txn(&check, "cannot fetch key")
+
+ // so we make the app and try again
+ dl.fullBlock(&makeapp)
+ // confirm hardcoded "state" index is correct
+ g, ok := globals(t, dl.generator, addrs[0], state)
+ require.True(t, ok)
+ require.EqualValues(t, 3, g.GlobalStateSchema.NumByteSlice)
+
+ // still no good, because creating an app does not opt in the creator
+ dl.txn(&check, "cannot fetch key")
+
+ // opt-in addr[0]
+ dl.fullBlock(&txntest.Txn{Type: "appl", ApplicationID: state, Sender: addrs[0], OnCompletion: transactions.OptInOC})
+ check.ApplicationArgs = [][]byte{stateBytes, {0}, {0}}
+ dl.fullBlock(&check)
+ })
+}
+
+func TestRekeying(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
// Bring up a ledger
genesisInitState, addrs, keys := ledgertesting.Genesis(10)
@@ -263,7 +442,7 @@ func TestRekeying(t *testing.T) {
genesisHdr, err := l.BlockHdr(basics.Round(0))
require.NoError(t, err)
newBlock := bookkeeping.MakeBlock(genesisHdr)
- eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0, nil)
require.NoError(t, err)
for _, stxn := range stxns {
@@ -420,7 +599,7 @@ func TestMinBalanceChanges(t *testing.T) {
},
}
- const expectedID basics.AssetIndex = 1
+ const expectedID basics.AssetIndex = 1001
optInTxn := txntest.Txn{
Type: "axfer",
Sender: addrs[5],
@@ -487,7 +666,7 @@ func TestAppInsMinBalance(t *testing.T) {
l := newSimpleLedgerWithConsensusVersion(t, genBalances, protocol.ConsensusV30, cfg)
defer l.Close()
- const appid basics.AppIndex = 1
+ const appID basics.AppIndex = 1
maxAppsOptedIn := config.Consensus[protocol.ConsensusV30].MaxAppsOptedIn
require.Greater(t, maxAppsOptedIn, 0)
@@ -521,7 +700,7 @@ func TestAppInsMinBalance(t *testing.T) {
optInTxn := txntest.Txn{
Type: protocol.ApplicationCallTx,
Sender: addrs[9],
- ApplicationID: appid + basics.AppIndex(i),
+ ApplicationID: appID + basics.AppIndex(i),
OnCompletion: transactions.OptInOC,
}
txnsOptIn = append(txnsOptIn, &optInTxn)
diff --git a/ledger/evalbench_test.go b/ledger/evalbench_test.go
index 9b827d83f..513449727 100644
--- a/ledger/evalbench_test.go
+++ b/ledger/evalbench_test.go
@@ -37,7 +37,7 @@ import (
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
- "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/eval"
"github.com/algorand/go-algorand/ledger/ledgercore"
ledgertesting "github.com/algorand/go-algorand/ledger/testing"
"github.com/algorand/go-algorand/logging"
@@ -491,7 +491,7 @@ func benchmarkBlockEvaluator(b *testing.B, inMem bool, withCrypto bool, proto pr
if withCrypto {
_, err = l2.Validate(context.Background(), validatedBlock.Block(), backlogPool)
} else {
- _, err = internal.Eval(context.Background(), l2, validatedBlock.Block(), false, nil, nil)
+ _, err = eval.Eval(context.Background(), l2, validatedBlock.Block(), false, nil, nil, l2.tracer)
}
require.NoError(b, err)
}
@@ -505,7 +505,7 @@ func benchmarkBlockEvaluator(b *testing.B, inMem bool, withCrypto bool, proto pr
func benchmarkPreparePaymentTransactionsTesting(b *testing.B, numTxns int, txnSource BenchTxnGenerator, genesisInitState ledgercore.InitState, addrs []basics.Address, keys []*crypto.SignatureSecrets, l, l2 *Ledger) *ledgercore.ValidatedBlock {
newBlock := bookkeeping.MakeBlock(genesisInitState.Block.BlockHeader)
- bev, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ bev, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0, nil)
require.NoError(b, err)
genHash := l.GenesisHash()
@@ -529,7 +529,7 @@ func benchmarkPreparePaymentTransactionsTesting(b *testing.B, numTxns int, txnSo
require.NoError(b, err)
}
newBlock = bookkeeping.MakeBlock(validatedBlock.Block().BlockHeader)
- bev, err = l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ bev, err = l.StartEvaluator(newBlock.BlockHeader, 0, 0, nil)
require.NoError(b, err)
numBlocks++
}
@@ -550,7 +550,7 @@ func benchmarkPreparePaymentTransactionsTesting(b *testing.B, numTxns int, txnSo
wg.Wait()
newBlock = bookkeeping.MakeBlock(validatedBlock.Block().BlockHeader)
- bev, err = l.StartEvaluator(newBlock.BlockHeader, 0, 0)
+ bev, err = l.StartEvaluator(newBlock.BlockHeader, 0, 0, nil)
require.NoError(b, err)
}
diff --git a/ledger/evalindexer.go b/ledger/evalindexer.go
index a575c638b..5ac28ce81 100644
--- a/ledger/evalindexer.go
+++ b/ledger/evalindexer.go
@@ -25,7 +25,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/eval"
"github.com/algorand/go-algorand/ledger/ledgercore"
)
@@ -215,6 +215,11 @@ func (l indexerLedgerConnector) VotersForStateProof(_ basics.Round) (*ledgercore
return nil, errors.New("VotersForStateProof() not implemented")
}
+// GetStateProofVerificationContext is part of LedgerForEvaluator interface.
+func (l indexerLedgerConnector) GetStateProofVerificationContext(_ basics.Round) (*ledgercore.StateProofVerificationContext, error) {
+ return nil, errors.New("GetStateProofVerificationContext() not implemented")
+}
+
func makeIndexerLedgerConnector(il indexerLedgerForEval, genesisHash crypto.Digest, genesisProto config.ConsensusParams, latestRound basics.Round, roundResources EvalForIndexerResources) indexerLedgerConnector {
return indexerLedgerConnector{
il: il,
@@ -233,9 +238,9 @@ func makeIndexerLedgerConnector(il indexerLedgerForEval, genesisHash crypto.Dige
func EvalForIndexer(il indexerLedgerForEval, block *bookkeeping.Block, proto config.ConsensusParams, resources EvalForIndexerResources) (ledgercore.StateDelta, []transactions.SignedTxnInBlock, error) {
ilc := makeIndexerLedgerConnector(il, block.GenesisHash(), proto, block.Round()-1, resources)
- eval, err := internal.StartEvaluator(
+ eval, err := eval.StartEvaluator(
ilc, block.BlockHeader,
- internal.EvaluatorOptions{
+ eval.EvaluatorOptions{
PaysetHint: len(block.Payset),
ProtoParams: &proto,
Generate: false,
diff --git a/ledger/fullblock_perf_test.go b/ledger/fullblock_perf_test.go
index 9bea4a00a..cad634a5a 100644
--- a/ledger/fullblock_perf_test.go
+++ b/ledger/fullblock_perf_test.go
@@ -37,7 +37,7 @@ import (
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/transactions/verify"
- "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/eval"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
@@ -53,7 +53,7 @@ type benchConfig struct {
acctToApp map[basics.Address]map[basics.AppIndex]struct{}
l0 *Ledger
l1 *Ledger
- eval *internal.BlockEvaluator
+ eval *eval.BlockEvaluator
numPay uint64
numAst uint64
numApp uint64
@@ -119,7 +119,7 @@ func setupEnv(b *testing.B, numAccts int) (bc *benchConfig) {
require.NoError(b, err)
newBlk := bookkeeping.MakeBlock(blk.BlockHeader)
- eval, err := l0.StartEvaluator(newBlk.BlockHeader, 5000, 0)
+ blockEvaluator, err := l0.StartEvaluator(newBlk.BlockHeader, 5000, 0, nil)
require.NoError(b, err)
bc = &benchConfig{
@@ -132,7 +132,7 @@ func setupEnv(b *testing.B, numAccts int) (bc *benchConfig) {
acctToApp: acctToApp,
l0: l0,
l1: l1,
- eval: eval,
+ eval: blockEvaluator,
}
// start the ledger with a pool of accounts
@@ -144,7 +144,7 @@ func setupEnv(b *testing.B, numAccts int) (bc *benchConfig) {
addBlock(bc)
vc := verify.GetMockedCache(true)
for _, blk := range bc.blocks {
- _, err := internal.Eval(context.Background(), bc.l1, blk, true, vc, nil)
+ _, err := eval.Eval(context.Background(), bc.l1, blk, true, vc, nil, bc.l1.tracer)
require.NoError(b, err)
err = bc.l1.AddBlock(blk, cert)
require.NoError(b, err)
@@ -326,7 +326,7 @@ func addBlock(bc *benchConfig) {
prev, err := bc.l0.BlockHdr(basics.Round(last))
require.NoError(bc.b, err)
newBlk := bookkeeping.MakeBlock(prev)
- bc.eval, err = bc.l0.StartEvaluator(newBlk.BlockHeader, 5000, 0)
+ bc.eval, err = bc.l0.StartEvaluator(newBlk.BlockHeader, 5000, 0, nil)
bc.round++
require.NoError(bc.b, err)
}
@@ -424,7 +424,7 @@ func benchmarkBlockValidationMix(b *testing.B, newAcctProb, payProb, astProb flo
tt := time.Now()
b.ResetTimer()
for _, blk := range bc.blocks {
- _, err := internal.Eval(context.Background(), bc.l1, blk, true, vc, nil)
+ _, err := eval.Eval(context.Background(), bc.l1, blk, true, vc, nil, bc.l1.tracer)
require.NoError(b, err)
err = bc.l1.AddBlock(blk, cert)
require.NoError(b, err)
diff --git a/ledger/ledger.go b/ledger/ledger.go
index 21547e30b..0c7747087 100644
--- a/ledger/ledger.go
+++ b/ledger/ledger.go
@@ -31,9 +31,10 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/transactions/verify"
"github.com/algorand/go-algorand/ledger/apply"
- "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/eval"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/ledger/store/blockdb"
"github.com/algorand/go-algorand/ledger/store/trackerdb"
@@ -78,13 +79,14 @@ type Ledger struct {
genesisProtoVersion protocol.ConsensusVersion
// State-machine trackers
- accts accountUpdates
- acctsOnline onlineAccounts
- catchpoint catchpointTracker
- txTail txTail
- bulletin bulletin
- notifier blockNotifier
- metrics metricsTracker
+ accts accountUpdates
+ acctsOnline onlineAccounts
+ catchpoint catchpointTracker
+ txTail txTail
+ bulletin bulletin
+ notifier blockNotifier
+ metrics metricsTracker
+ spVerification spVerificationTracker
trackers trackerRegistry
trackerMu deadlock.RWMutex
@@ -97,6 +99,8 @@ type Ledger struct {
cfg config.Local
dbPathPrefix string
+
+ tracer logic.EvalTracer
}
// OpenLedger creates a Ledger object, using SQLite database filenames
@@ -112,6 +116,10 @@ func OpenLedger(
verifiedCacheSize = cfg.TxPoolSize
log.Warnf("The VerifiedTranscationsCacheSize in the config file was misconfigured to have smaller size then the TxPoolSize; The verified cache size was adjusted from %d to %d.", cfg.VerifiedTranscationsCacheSize, cfg.TxPoolSize)
}
+ var tracer logic.EvalTracer
+ if cfg.EnableTxnEvalTracer {
+ tracer = eval.MakeTxnGroupDeltaTracer(cfg.MaxAcctLookback)
+ }
l := &Ledger{
log: log,
@@ -125,6 +133,7 @@ func OpenLedger(
verifiedTxnCache: verify.MakeVerifiedTransactionCache(verifiedCacheSize),
cfg: cfg,
dbPathPrefix: dbPathPrefix,
+ tracer: tracer,
}
l.headerCache.initialize()
@@ -205,13 +214,14 @@ func (l *Ledger) reloadLedger() error {
// set account updates tracker as a driver to calculate tracker db round and committing offsets
trackers := []ledgerTracker{
- &l.accts, // update the balances
- &l.catchpoint, // catchpoints tracker : update catchpoint labels, create catchpoint files
- &l.acctsOnline, // update online account balances history
- &l.txTail, // update the transaction tail, tracking the recent 1000 txn
- &l.bulletin, // provide closed channel signaling support for completed rounds
- &l.notifier, // send OnNewBlocks to subscribers
- &l.metrics, // provides metrics reporting support
+ &l.accts, // update the balances
+ &l.catchpoint, // catchpoints tracker : update catchpoint labels, create catchpoint files
+ &l.acctsOnline, // update online account balances history
+ &l.txTail, // update the transaction tail, tracking the recent 1000 txn
+ &l.bulletin, // provide closed channel signaling support for completed rounds
+ &l.notifier, // send OnNewBlocks to subscribers
+ &l.metrics, // provides metrics reporting support
+ &l.spVerification, // provides state proof verification support
}
l.accts.initialize(l.cfg)
@@ -401,6 +411,21 @@ func (l *Ledger) RegisterBlockListeners(listeners []ledgercore.BlockListener) {
l.notifier.register(listeners)
}
+// RegisterVotersCommitListener registers a listener that will be called when a
+// commit is about to cover a round.
+func (l *Ledger) RegisterVotersCommitListener(listener ledgercore.VotersCommitListener) {
+ l.trackerMu.RLock()
+ defer l.trackerMu.RUnlock()
+ l.acctsOnline.voters.registerPrepareCommitListener(listener)
+}
+
+// UnregisterVotersCommitListener unregisters the commit listener.
+func (l *Ledger) UnregisterVotersCommitListener() {
+ l.trackerMu.RLock()
+ defer l.trackerMu.RUnlock()
+ l.acctsOnline.voters.unregisterPrepareCommitListener()
+}
+
// notifyCommit informs the trackers that all blocks up to r have been
// written to disk. Returns the minimum block number that must be kept
// in the database.
@@ -449,13 +474,26 @@ func (l *Ledger) GetStateDeltaForRound(rnd basics.Round) (ledgercore.StateDelta,
return l.accts.lookupStateDelta(rnd)
}
+// GetTracer returns the logic.EvalTracer attached to the ledger--can be nil.
+func (l *Ledger) GetTracer() logic.EvalTracer {
+ return l.tracer
+}
+
// VotersForStateProof returns the top online accounts at round rnd.
// The result might be nil, even with err=nil, if there are no voters
// for that round because state proofs were not enabled.
func (l *Ledger) VotersForStateProof(rnd basics.Round) (*ledgercore.VotersForRound, error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
- return l.acctsOnline.voters.getVoters(rnd)
+ return l.acctsOnline.voters.VotersForStateProof(rnd)
+}
+
+// GetStateProofVerificationContext returns the data required to verify the state proof whose last attested round is
+// stateProofLastAttestedRound.
+func (l *Ledger) GetStateProofVerificationContext(stateProofLastAttestedRound basics.Round) (*ledgercore.StateProofVerificationContext, error) {
+ l.trackerMu.RLock()
+ defer l.trackerMu.RUnlock()
+ return l.spVerification.LookupVerificationContext(stateProofLastAttestedRound)
}
// ListAssets takes a maximum asset index and maximum result length, and
@@ -592,11 +630,12 @@ func (l *Ledger) LatestTotals() (basics.Round, ledgercore.AccountTotals, error)
return l.accts.LatestTotals()
}
-// OnlineTotals returns the online totals of all accounts at the end of round rnd.
-func (l *Ledger) OnlineTotals(rnd basics.Round) (basics.MicroAlgos, error) {
+// OnlineCirculation returns the online totals of all accounts at the end of round rnd.
+// It implements agreement's calls for Circulation(rnd)
+func (l *Ledger) OnlineCirculation(rnd basics.Round, voteRnd basics.Round) (basics.MicroAlgos, error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
- return l.acctsOnline.onlineTotals(rnd)
+ return l.acctsOnline.onlineCirculation(rnd, voteRnd)
}
// CheckDup return whether a transaction is a duplicate one.
@@ -655,7 +694,7 @@ func (l *Ledger) BlockCert(rnd basics.Round) (blk bookkeeping.Block, cert agreem
func (l *Ledger) AddBlock(blk bookkeeping.Block, cert agreement.Certificate) error {
// passing nil as the executionPool is ok since we've asking the evaluator to skip verification.
- updates, err := internal.Eval(context.Background(), l, blk, false, l.verifiedTxnCache, nil)
+ updates, err := eval.Eval(context.Background(), l, blk, false, l.verifiedTxnCache, nil, l.tracer)
if err != nil {
if errNSBE, ok := err.(ledgercore.ErrNonSequentialBlockEval); ok && errNSBE.EvaluatorRound <= errNSBE.LatestRound {
return ledgercore.BlockInLedgerError{
@@ -782,9 +821,9 @@ func (l *Ledger) trackerLog() logging.Logger {
// trackerEvalVerified is used by the accountUpdates to reconstruct the ledgercore.StateDelta from a given block during it's loadFromDisk execution.
// when this function is called, the trackers mutex is expected already to be taken. The provided accUpdatesLedger would allow the
// evaluator to shortcut the "main" ledger ( i.e. this struct ) and avoid taking the trackers lock a second time.
-func (l *Ledger) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger internal.LedgerForEvaluator) (ledgercore.StateDelta, error) {
+func (l *Ledger) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger eval.LedgerForEvaluator) (ledgercore.StateDelta, error) {
// passing nil as the executionPool is ok since we've asking the evaluator to skip verification.
- return internal.Eval(context.Background(), accUpdatesLedger, blk, false, l.verifiedTxnCache, nil)
+ return eval.Eval(context.Background(), accUpdatesLedger, blk, false, l.verifiedTxnCache, nil, l.tracer)
}
// IsWritingCatchpointDataFile returns true when a catchpoint file is being generated.
@@ -793,7 +832,7 @@ func (l *Ledger) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger int
func (l *Ledger) IsWritingCatchpointDataFile() bool {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
- return l.catchpoint.IsWritingCatchpointDataFile()
+ return l.catchpoint.isWritingCatchpointDataFile()
}
// VerifiedTransactionCache returns the verify.VerifiedTransactionCache
@@ -808,13 +847,16 @@ func (l *Ledger) VerifiedTransactionCache() verify.VerifiedTransactionCache {
// provides a cap on the size of a single generated block size, when a non-zero value is passed.
// If a value of zero or less is passed to maxTxnBytesPerBlock, the consensus MaxTxnBytesPerBlock would
// be used instead.
-func (l *Ledger) StartEvaluator(hdr bookkeeping.BlockHeader, paysetHint, maxTxnBytesPerBlock int) (*internal.BlockEvaluator, error) {
- return internal.StartEvaluator(l, hdr,
- internal.EvaluatorOptions{
+// The tracer argument is a logic.EvalTracer which will be attached to the evaluator and have its hooked invoked during
+// the eval process for each block. A nil tracer will skip tracer invocation entirely.
+func (l *Ledger) StartEvaluator(hdr bookkeeping.BlockHeader, paysetHint, maxTxnBytesPerBlock int, tracer logic.EvalTracer) (*eval.BlockEvaluator, error) {
+ return eval.StartEvaluator(l, hdr,
+ eval.EvaluatorOptions{
PaysetHint: paysetHint,
Generate: true,
Validate: true,
MaxTxnBytesPerBlock: maxTxnBytesPerBlock,
+ Tracer: tracer,
})
}
@@ -828,7 +870,7 @@ func (l *Ledger) FlushCaches() {
// not a valid block (e.g., it has duplicate transactions, overspends some
// account, etc).
func (l *Ledger) Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ledgercore.ValidatedBlock, error) {
- delta, err := internal.Eval(ctx, l, blk, true, l.verifiedTxnCache, executionPool)
+ delta, err := eval.Eval(ctx, l, blk, true, l.verifiedTxnCache, executionPool, l.tracer)
if err != nil {
return nil, err
}
@@ -843,11 +885,11 @@ func (l *Ledger) LatestTrackerCommitted() basics.Round {
}
// DebuggerLedger defines the minimal set of method required for creating a debug balances.
-type DebuggerLedger = internal.LedgerForCowBase
+type DebuggerLedger = eval.LedgerForCowBase
// MakeDebugBalances creates a ledger suitable for dryrun and debugger
func MakeDebugBalances(l DebuggerLedger, round basics.Round, proto protocol.ConsensusVersion, prevTimestamp int64) apply.Balances {
- return internal.MakeDebugBalances(l, round, proto, prevTimestamp)
+ return eval.MakeDebugBalances(l, round, proto, prevTimestamp)
}
var ledgerInitblocksdbCount = metrics.NewCounter("ledger_initblocksdb_count", "calls")
diff --git a/ledger/ledger_perf_test.go b/ledger/ledger_perf_test.go
index 2c5cb8ec5..ba76df5e7 100644
--- a/ledger/ledger_perf_test.go
+++ b/ledger/ledger_perf_test.go
@@ -36,7 +36,7 @@ import (
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/transactions/verify"
- "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/eval"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
@@ -205,7 +205,7 @@ func benchmarkFullBlocks(params testParams, b *testing.B) {
prev, err := l0.BlockHdr(basics.Round(i))
require.NoError(b, err)
newBlk := bookkeeping.MakeBlock(prev)
- eval, err := l0.StartEvaluator(newBlk.BlockHeader, 5000, 0)
+ eval, err := l0.StartEvaluator(newBlk.BlockHeader, 5000, 0, nil)
require.NoError(b, err)
// build a payset
@@ -319,7 +319,7 @@ func benchmarkFullBlocks(params testParams, b *testing.B) {
vc := verify.GetMockedCache(true)
b.ResetTimer()
for _, blk := range blocks {
- _, err = internal.Eval(context.Background(), l1, blk, true, vc, nil)
+ _, err = eval.Eval(context.Background(), l1, blk, true, vc, nil, l1.tracer)
require.NoError(b, err)
err = l1.AddBlock(blk, cert)
require.NoError(b, err)
diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go
index d0324e249..5b7ceb33c 100644
--- a/ledger/ledger_test.go
+++ b/ledger/ledger_test.go
@@ -23,6 +23,7 @@ import (
"fmt"
"math/rand"
"os"
+ "path/filepath"
"runtime"
"sort"
"testing"
@@ -40,6 +41,7 @@ import (
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/transactions/verify"
+ "github.com/algorand/go-algorand/ledger/eval"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/ledger/store/trackerdb"
ledgertesting "github.com/algorand/go-algorand/ledger/testing"
@@ -1482,23 +1484,24 @@ func benchLedgerCache(b *testing.B, startRound basics.Round) {
}
func triggerTrackerFlush(t *testing.T, l *Ledger, genesisInitState ledgercore.InitState) {
- l.trackers.mu.RLock()
+ l.trackers.mu.Lock()
initialDbRound := l.trackers.dbRound
currentDbRound := initialDbRound
l.trackers.lastFlushTime = time.Time{}
- l.trackers.mu.RUnlock()
+ l.trackers.mu.Unlock()
- addEmptyValidatedBlock(t, l, genesisInitState.Accounts)
-
- const timeout = 2 * time.Second
+ const timeout = 3 * time.Second
started := time.Now()
// We can't truly wait for scheduleCommit to take place, which means without waiting using sleeps
// we might beat scheduleCommit's addition to accountsWriting, making our wait on it continue immediately.
- // The solution is to wait for the advancement of l.trackers.dbRound, which is a side effect of postCommit's success.
+ // The solution is to continue to add blocks and wait for the advancement of l.trackers.dbRound,
+ // which is a side effect of postCommit's success.
for currentDbRound == initialDbRound {
time.Sleep(50 * time.Microsecond)
- require.True(t, time.Now().Sub(started) < timeout)
+ require.True(t, time.Since(started) < timeout)
+ addEmptyValidatedBlock(t, l, genesisInitState.Accounts)
+ l.WaitForCommit(l.Latest())
l.trackers.mu.RLock()
currentDbRound = l.trackers.dbRound
l.trackers.mu.RUnlock()
@@ -1545,36 +1548,6 @@ func TestLedgerReload(t *testing.T) {
ledgertesting.WithAndWithoutLRUCache(t, cfg, testLedgerReload)
}
-func TestWaitLedgerReload(t *testing.T) {
- partitiontest.PartitionTest(t)
- a := require.New(t)
-
- dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
- const inMem = true
- cfg := config.GetDefaultLocal()
- cfg.MaxAcctLookback = 0
- log := logging.TestingLog(t)
- log.SetLevel(logging.Info)
- l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
- require.NoError(t, err)
- defer l.Close()
-
- waitRound := l.Latest() + 1
- waitChannel := l.Wait(waitRound)
-
- err = l.reloadLedger()
- a.NoError(err)
- triggerTrackerFlush(t, l, genesisInitState)
-
- select {
- case <-waitChannel:
- return
- default:
- a.Failf("", "Wait channel did not receive an expected signal for round %d", waitRound)
- }
-}
-
// TestGetLastCatchpointLabel tests ledger.GetLastCatchpointLabel is returning the correct value.
func TestGetLastCatchpointLabel(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -1715,18 +1688,16 @@ func TestListAssetsAndApplications(t *testing.T) {
require.Equal(t, appCount, len(results))
}
-// TestLedgerKeepsOldBlocksForStateProof test that if stateproof chain is delayed for X intervals, the ledger will not
-// remove old blocks from the database. When verifying old stateproof transaction, nodes must have the header of the corresponding
-// voters round, if this won't be available the verification would fail.
-// the voter tracker should prevent the remove needed blocks from the database.
-func TestLedgerKeepsOldBlocksForStateProof(t *testing.T) {
+// TestLedgerVerifiesOldStateProofs test that if stateproof chain is delayed for X intervals (pass StateProofMaxRecoveryIntervals),
+// The ledger will still be able to verify the state proof - i.e the ledger has the necessary data to verify it.
+func TestLedgerVerifiesOldStateProofs(t *testing.T) {
partitiontest.PartitionTest(t)
- // since the first state proof is expected to happen on stateproofInterval*2 we would start give-up on state proofs we would
- // give up on old state proofs only after stateproofInterval*3
- maxBlocks := int((config.Consensus[protocol.ConsensusCurrentVersion].StateProofMaxRecoveryIntervals + 2) * config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval)
+ // since the first state proof is expected to happen on stateproofInterval*2 we would start
+ // give-up on state proofs only after stateproofInterval*3
+ maxBlocks := int((config.Consensus[protocol.ConsensusFuture].StateProofMaxRecoveryIntervals + 2) * config.Consensus[protocol.ConsensusFuture].StateProofInterval)
dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- genesisInitState, initKeys := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 10000000000)
+ genesisInitState, initKeys := ledgertesting.GenerateInitState(t, protocol.ConsensusFuture, 10000000000)
// place real values on the participation period, so we would create a commitment with some stake.
accountsWithValid := make(map[basics.Address]basics.AccountData)
@@ -1743,14 +1714,18 @@ func TestLedgerKeepsOldBlocksForStateProof(t *testing.T) {
}
genesisInitState.Accounts = accountsWithValid
- const inMem = true
cfg := config.GetDefaultLocal()
cfg.Archival = false
log := logging.TestingLog(t)
log.SetLevel(logging.Info)
+ const inMem = false
l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
require.NoError(t, err)
- defer l.Close()
+ defer func() {
+ l.Close()
+ os.Remove(dbName + ".block.sqlite")
+ os.Remove(dbName + ".tracker.sqlite")
+ }()
lastBlock, err := l.Block(l.Latest())
require.NoError(t, err)
@@ -1760,7 +1735,7 @@ func TestLedgerKeepsOldBlocksForStateProof(t *testing.T) {
// regular addresses: all init accounts minus pools
addresses := make([]basics.Address, len(genesisInitState.Accounts)-2, len(genesisInitState.Accounts)+maxBlocks)
- i := 0
+ i := uint64(0)
for addr := range genesisInitState.Accounts {
if addr != testPoolAddr && addr != testSinkAddr {
addresses[i] = addr
@@ -1770,32 +1745,47 @@ func TestLedgerKeepsOldBlocksForStateProof(t *testing.T) {
keys[addr] = initKeys[addr]
}
- for i := 0; i < maxBlocks; i++ {
+ for i = 0; i < uint64(maxBlocks)+proto.StateProofInterval; i++ {
addDummyBlock(t, addresses, proto, l, initKeys, genesisInitState)
}
backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
defer backlogPool.Shutdown()
- // On this round there is no give up on any state proof - so we would be able to verify an old state proof txn.
-
- // We now create block with stateproof transaction. since we don't want to complicate the test and create
- // a cryptographically correct stateproof we would make sure that only the crypto part of the verification fails.
+ triggerTrackerFlush(t, l, genesisInitState)
+ l.WaitForCommit(l.Latest())
blk := createBlkWithStateproof(t, maxBlocks, proto, genesisInitState, l, accounts)
_, err = l.Validate(context.Background(), blk, backlogPool)
require.ErrorContains(t, err, "state proof crypto error")
- for i := uint64(0); i < proto.StateProofInterval; i++ {
+ for i = 0; i < proto.StateProofInterval; i++ {
addDummyBlock(t, addresses, proto, l, initKeys, genesisInitState)
}
+ triggerTrackerFlush(t, l, genesisInitState)
+ addDummyBlock(t, addresses, proto, l, initKeys, genesisInitState)
l.WaitForCommit(l.Latest())
- // at the point the ledger would remove the voters round for the database.
- // that will cause the stateproof transaction verification to fail because there are
- // missing blocks
- blk = createBlkWithStateproof(t, maxBlocks, proto, genesisInitState, l, accounts)
- _, err = l.Validate(context.Background(), blk, backlogPool)
+ // At this point the block queue go-routine will start removing block . However, it might not complete the task
+ // for that reason, we wait for the next block to be committed.
+ addDummyBlock(t, addresses, proto, l, initKeys, genesisInitState)
+ l.WaitForCommit(l.Latest())
+
+ // we make sure that the voters header does not exist and that the voters tracker
+ // lost tracking of the top voters.
+ _, err = l.BlockHdr(basics.Round(proto.StateProofInterval))
+ require.Error(t, err)
expectedErr := &ledgercore.ErrNoEntry{}
require.True(t, errors.As(err, expectedErr), fmt.Sprintf("got error %s", err))
+
+ l.acctsOnline.voters.votersMu.Lock()
+ for k := range l.acctsOnline.voters.votersForRoundCache {
+ require.NotEqual(t, k, basics.Round(proto.StateProofInterval-proto.StateProofVotersLookback), "found voters for round 200, it should have been removed")
+ }
+ l.acctsOnline.voters.votersMu.Unlock()
+
+ // However, we are still able to very a state proof sicne we use the tracker
+ blk = createBlkWithStateproof(t, maxBlocks, proto, genesisInitState, l, accounts)
+ _, err = l.Validate(context.Background(), blk, backlogPool)
+ require.ErrorContains(t, err, "state proof crypto error")
}
func createBlkWithStateproof(t *testing.T, maxBlocks int, proto config.ConsensusParams, genesisInitState ledgercore.InitState, l *Ledger, accounts map[basics.Address]basics.AccountData) bookkeeping.Block {
@@ -1828,8 +1818,9 @@ func createBlkWithStateproof(t *testing.T, maxBlocks int, proto config.Consensus
}
func addDummyBlock(t *testing.T, addresses []basics.Address, proto config.ConsensusParams, l *Ledger, initKeys map[basics.Address]*crypto.SignatureSecrets, genesisInitState ledgercore.InitState) {
- stxns := make([]transactions.SignedTxn, 2)
- for j := 0; j < 2; j++ {
+ numOfTransactions := 2
+ stxns := make([]transactions.SignedTxn, numOfTransactions)
+ for j := 0; j < numOfTransactions; j++ {
txHeader := transactions.Header{
Sender: addresses[0],
Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * 2},
@@ -2214,10 +2205,11 @@ func TestLedgerReloadShrinkDeltas(t *testing.T) {
l.cfg = cfg
l.reloadLedger()
- _, err = l.OnlineTotals(basics.Round(proto.MaxBalLookback - shorterLookback))
+ rnd := basics.Round(proto.MaxBalLookback - shorterLookback)
+ _, err = l.OnlineCirculation(rnd, rnd+basics.Round(proto.MaxBalLookback))
require.Error(t, err)
for i := basics.Round(proto.MaxBalLookback - shorterLookback + 1); i <= l.Latest(); i++ {
- online, err := l.OnlineTotals(i)
+ online, err := l.OnlineCirculation(i, i+basics.Round(proto.MaxBalLookback))
require.NoError(t, err)
require.Equal(t, onlineTotals[i], online)
}
@@ -2640,10 +2632,11 @@ func TestLedgerMigrateV6ShrinkDeltas(t *testing.T) {
l2.Close()
}()
- _, err = l2.OnlineTotals(basics.Round(proto.MaxBalLookback - shorterLookback))
+ rnd := basics.Round(proto.MaxBalLookback - shorterLookback)
+ _, err = l2.OnlineCirculation(rnd, rnd+basics.Round(proto.MaxBalLookback))
require.Error(t, err)
for i := l2.Latest() - basics.Round(proto.MaxBalLookback-1); i <= l2.Latest(); i++ {
- online, err := l2.OnlineTotals(i)
+ online, err := l2.OnlineCirculation(i, i+basics.Round(proto.MaxBalLookback))
require.NoError(t, err)
require.Equal(t, onlineTotals[i], online)
}
@@ -2868,10 +2861,20 @@ func verifyVotersContent(t *testing.T, expected map[basics.Round]*ledgercore.Vot
}
}
+func triggerDeleteVoters(t *testing.T, l *Ledger, genesisInitState ledgercore.InitState) {
+ // We make the ledger flush tracker data to allow votersTracker to advance lowestRound
+ triggerTrackerFlush(t, l, genesisInitState)
+
+ // We add another block to make the block queue query the voter's tracker lowest round again, which allows it to forget
+ // rounds based on the new lowest round.
+ triggerTrackerFlush(t, l, genesisInitState)
+}
+
func testVotersReloadFromDisk(t *testing.T, cfg config.Local) {
+
proto := config.Consensus[protocol.ConsensusCurrentVersion]
dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- genesisInitState := getInitState()
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusFuture, 100)
genesisInitState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion
const inMem = true
@@ -2881,24 +2884,15 @@ func testVotersReloadFromDisk(t *testing.T, cfg config.Local) {
require.NoError(t, err)
defer l.Close()
- blk := genesisInitState.Block
- var sp bookkeeping.StateProofTrackingData
- sp.StateProofNextRound = basics.Round(proto.StateProofInterval * 2)
- blk.BlockHeader.StateProofTracking = map[protocol.StateProofType]bookkeeping.StateProofTrackingData{
- protocol.StateProofBasic: sp,
- }
-
// we add blocks to the ledger to test reload from disk. we would like the history of the acctonline to extend.
// but we don't want to go behind stateproof recovery interval
for i := uint64(0); i < (proto.StateProofInterval*(proto.StateProofMaxRecoveryIntervals-2) - proto.StateProofVotersLookback); i++ {
- blk.BlockHeader.Round++
- blk.BlockHeader.TimeStamp += 10
- err = l.AddBlock(blk, agreement.Certificate{})
- require.NoError(t, err)
+ addEmptyValidatedBlock(t, l, genesisInitState.Accounts)
}
// at this point the database should contain the voter for round 256 but the voters for round 512 should be in deltas
- l.WaitForCommit(blk.BlockHeader.Round)
+ l.WaitForCommit(l.Latest())
+ triggerTrackerFlush(t, l, genesisInitState)
vtSnapshot := l.acctsOnline.voters.votersForRoundCache
// ensuring no tree was evicted.
@@ -2928,7 +2922,7 @@ func testVotersReloadFromDiskAfterOneStateProofCommitted(t *testing.T, cfg confi
proto := config.Consensus[protocol.ConsensusCurrentVersion]
dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- genesisInitState := getInitState()
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
genesisInitState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion
const inMem = true
@@ -2950,7 +2944,6 @@ func testVotersReloadFromDiskAfterOneStateProofCommitted(t *testing.T, cfg confi
for i := uint64(0); i < (proto.StateProofInterval*3 - proto.StateProofVotersLookback); i++ {
blk.BlockHeader.Round++
- blk.BlockHeader.TimeStamp += 10
err = l.AddBlock(blk, agreement.Certificate{})
require.NoError(t, err)
}
@@ -2963,12 +2956,11 @@ func testVotersReloadFromDiskAfterOneStateProofCommitted(t *testing.T, cfg confi
for i := uint64(0); i < proto.StateProofInterval; i++ {
blk.BlockHeader.Round++
- blk.BlockHeader.TimeStamp += 10
err = l.AddBlock(blk, agreement.Certificate{})
require.NoError(t, err)
}
- l.WaitForCommit(blk.BlockHeader.Round)
+ triggerDeleteVoters(t, l, genesisInitState)
vtSnapshot := l.acctsOnline.voters.votersForRoundCache
// verifying that the tree for round 512 is still in the cache, but the tree for round 256 is evicted.
@@ -2996,7 +2988,7 @@ func testVotersReloadFromDiskPassRecoveryPeriod(t *testing.T, cfg config.Local)
proto := config.Consensus[protocol.ConsensusCurrentVersion]
dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- genesisInitState := getInitState()
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
genesisInitState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion
const inMem = true
@@ -3016,14 +3008,13 @@ func testVotersReloadFromDiskPassRecoveryPeriod(t *testing.T, cfg config.Local)
// we push proto.StateProofInterval * (proto.StateProofMaxRecoveryIntervals + 2) block into the ledger
// the reason for + 2 is the first state proof is on 2*stateproofinterval.
for i := uint64(0); i < (proto.StateProofInterval * (proto.StateProofMaxRecoveryIntervals + 2)); i++ {
- blk.BlockHeader.Round++
- blk.BlockHeader.TimeStamp += 10
- err = l.AddBlock(blk, agreement.Certificate{})
- require.NoError(t, err)
+ addEmptyValidatedBlock(t, l, genesisInitState.Accounts)
}
// the voters tracker should contain all the voters for each stateproof round. nothing should be removed
- l.WaitForCommit(blk.BlockHeader.Round)
+ l.WaitForCommit(l.Latest())
+ triggerTrackerFlush(t, l, genesisInitState)
+
vtSnapshot := l.acctsOnline.voters.votersForRoundCache
beforeRemoveVotersLen := len(vtSnapshot)
err = l.reloadLedger()
@@ -3033,14 +3024,15 @@ func testVotersReloadFromDiskPassRecoveryPeriod(t *testing.T, cfg config.Local)
verifyVotersContent(t, vtSnapshot, l.acctsOnline.voters.votersForRoundCache)
for i := uint64(0); i < proto.StateProofInterval; i++ {
- blk.BlockHeader.Round++
- blk.BlockHeader.TimeStamp += 10
- err = l.AddBlock(blk, agreement.Certificate{})
- require.NoError(t, err)
+ addEmptyValidatedBlock(t, l, genesisInitState.Accounts)
}
- // the voters tracker should give up on voters for round 512
- l.WaitForCommit(blk.BlockHeader.Round)
+ triggerDeleteVoters(t, l, genesisInitState)
+
+ // round 512 should now be forgotten.
+ _, found = l.acctsOnline.voters.votersForRoundCache[basics.Round(proto.StateProofInterval-proto.StateProofVotersLookback)]
+ require.False(t, found)
+
vtSnapshot = l.acctsOnline.voters.votersForRoundCache
err = l.reloadLedger()
require.NoError(t, err)
@@ -3054,11 +3046,321 @@ func testVotersReloadFromDiskPassRecoveryPeriod(t *testing.T, cfg config.Local)
func TestVotersReloadFromDiskPassRecoveryPeriod(t *testing.T) {
partitiontest.PartitionTest(t)
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = false
+ cfg.MaxAcctLookback = 0
+
+ ledgertesting.WithAndWithoutLRUCache(t, cfg, testVotersReloadFromDiskPassRecoveryPeriod)
+}
+
+type mockCommitListener struct{}
+
+func (l *mockCommitListener) OnPrepareVoterCommit(oldBase basics.Round, newBase basics.Round, _ ledgercore.LedgerForSPBuilder) {
+}
+
+func TestVotersCallbackPersistsAfterLedgerReload(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion
+ const inMem = true
+ cfg := config.GetDefaultLocal()
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
+ l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
+ require.NoError(t, err)
+ defer l.Close()
+
+ commitListener := mockCommitListener{}
+ l.RegisterVotersCommitListener(&commitListener)
+ listenerBeforeReload := l.acctsOnline.voters.commitListener
+
+ require.NotNil(t, listenerBeforeReload)
+ err = l.reloadLedger()
+ require.NoError(t, err)
+
+ listenerAfterReload := l.acctsOnline.voters.commitListener
+ require.Equal(t, listenerBeforeReload, listenerAfterReload)
+}
+
+type errorCommitListener struct{}
+
+func (l *errorCommitListener) OnPrepareVoterCommit(oldBase basics.Round, newBase basics.Round, _ ledgercore.LedgerForSPBuilder) {
+}
+
+func TestLedgerContinuesOnVotersCallbackFailure(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion
+ const inMem = true
+ cfg := config.GetDefaultLocal()
+ cfg.MaxAcctLookback = 0
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
+ l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
+ require.NoError(t, err)
+ defer l.Close()
+
+ commitListener := errorCommitListener{}
+ l.RegisterVotersCommitListener(&commitListener)
+
+ previousCachedDbRound := l.trackers.dbRound
+ triggerTrackerFlush(t, l, genesisInitState)
+ l.trackers.mu.Lock()
+ newDbRound := l.trackers.dbRound
+ l.trackers.mu.Unlock()
+ require.Equal(t, previousCachedDbRound+1, newDbRound)
+}
+
+func TestLedgerSPVerificationTracker(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion
+ const inMem = true
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = false
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
+ l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
+ require.NoError(t, err)
+ defer l.Close()
+
+ numOfStateProofs := uint64(3)
+ firstStateProofContextConfirmedRound := proto.StateProofInterval
+ firstStateProofContextTargetRound := firstStateProofContextConfirmedRound + proto.StateProofInterval
+
+ lastStateProofContextConfirmedRound := firstStateProofContextConfirmedRound + proto.StateProofInterval*(numOfStateProofs-1)
+ lastStateProofContextTargetRound := lastStateProofContextConfirmedRound + proto.StateProofInterval
+
+ for i := uint64(0); i < firstStateProofContextConfirmedRound-1; i++ {
+ addEmptyValidatedBlock(t, l, genesisInitState.Accounts)
+ }
+
+ verifyStateProofVerificationTracking(t, &l.spVerification, basics.Round(firstStateProofContextTargetRound),
+ 1, proto.StateProofInterval, false, any)
+
+ addEmptyValidatedBlock(t, l, genesisInitState.Accounts)
+
+ verifyStateProofVerificationTracking(t, &l.spVerification, basics.Round(firstStateProofContextTargetRound),
+ 1, proto.StateProofInterval, true, trackerMemory)
+
+ for i := firstStateProofContextConfirmedRound; i < lastStateProofContextConfirmedRound; i++ {
+ addEmptyValidatedBlock(t, l, genesisInitState.Accounts)
+ }
+
+ l.WaitForCommit(l.Latest())
+ triggerTrackerFlush(t, l, genesisInitState)
+
+ verifyStateProofVerificationTracking(t, &l.spVerification, basics.Round(firstStateProofContextTargetRound),
+ numOfStateProofs-1, proto.StateProofInterval, true, trackerDB)
+ // Last one should be in memory as a result of cfg.MaxAcctLookback not being equal to 0.
+ verifyStateProofVerificationTracking(t, &l.spVerification, basics.Round(lastStateProofContextTargetRound),
+ 1, proto.StateProofInterval, true, trackerMemory)
+
+ l.WaitForCommit(l.Latest())
+ triggerTrackerFlush(t, l, genesisInitState)
+
+ verifyStateProofVerificationTracking(t, &l.spVerification, basics.Round(firstStateProofContextTargetRound),
+ numOfStateProofs, proto.StateProofInterval, true, any)
+
+ blk := makeNewEmptyBlock(t, l, t.Name(), genesisInitState.Accounts)
+ var stateProofReceived bookkeeping.StateProofTrackingData
+ stateProofReceived.StateProofNextRound = basics.Round(firstStateProofContextTargetRound + proto.StateProofInterval)
+ blk.BlockHeader.StateProofTracking = map[protocol.StateProofType]bookkeeping.StateProofTrackingData{
+ protocol.StateProofBasic: stateProofReceived,
+ }
+
+ // This implementation is an easy way to feed the delta, which the state proof verification tracker relies on,
+ // to the ledger.
+ delta, err := eval.Eval(context.Background(), l, blk, false, l.verifiedTxnCache, nil, l.tracer)
+ require.NoError(t, err)
+ delta.StateProofNext = stateProofReceived.StateProofNextRound
+ vb := ledgercore.MakeValidatedBlock(blk, delta)
+ err = l.AddValidatedBlock(vb, agreement.Certificate{})
+ require.NoError(t, err)
+
+ for i := uint64(0); i < proto.MaxBalLookback; i++ {
+ addEmptyValidatedBlock(t, l, genesisInitState.Accounts)
+ }
+
+ l.WaitForCommit(blk.BlockHeader.Round)
+ triggerTrackerFlush(t, l, genesisInitState)
+
+ verifyStateProofVerificationTracking(t, &l.spVerification, basics.Round(firstStateProofContextTargetRound),
+ 1, proto.StateProofInterval, false, any)
+ verifyStateProofVerificationTracking(t, &l.spVerification, basics.Round(firstStateProofContextTargetRound+proto.StateProofInterval),
+ numOfStateProofs-1, proto.StateProofInterval, true, any)
+}
+
+func TestLedgerReloadStateProofVerificationTracker(t *testing.T) {
+ partitiontest.PartitionTest(t)
proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion
+ const inMem = true
cfg := config.GetDefaultLocal()
cfg.Archival = false
- cfg.MaxAcctLookback = proto.StateProofInterval - proto.StateProofVotersLookback - 10
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
+ l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
+ require.NoError(t, err)
+ defer l.Close()
- ledgertesting.WithAndWithoutLRUCache(t, cfg, testVotersReloadFromDiskPassRecoveryPeriod)
+ numOfStateProofs := uint64(3)
+ firstStateProofContextConfirmedRound := proto.StateProofInterval
+ firstStateProofContextTargetRound := firstStateProofContextConfirmedRound + proto.StateProofInterval
+
+ lastStateProofContextConfirmedRound := firstStateProofContextConfirmedRound + proto.StateProofInterval*(numOfStateProofs-1)
+ lastStateProofContextTargetRound := lastStateProofContextConfirmedRound + proto.StateProofInterval
+
+ for i := uint64(0); i < lastStateProofContextConfirmedRound; i++ {
+ addEmptyValidatedBlock(t, l, genesisInitState.Accounts)
+ }
+
+ triggerTrackerFlush(t, l, genesisInitState)
+
+ verifyStateProofVerificationTracking(t, &l.spVerification, basics.Round(firstStateProofContextTargetRound),
+ numOfStateProofs-1, proto.StateProofInterval, true, trackerDB)
+ verifyStateProofVerificationTracking(t, &l.spVerification, basics.Round(lastStateProofContextTargetRound),
+ 1, proto.StateProofInterval, true, trackerMemory)
+
+ err = l.reloadLedger()
+ require.NoError(t, err)
+
+ verifyStateProofVerificationTracking(t, &l.spVerification, basics.Round(firstStateProofContextTargetRound),
+ numOfStateProofs-1, proto.StateProofInterval, true, trackerDB)
+ verifyStateProofVerificationTracking(t, &l.spVerification, basics.Round(lastStateProofContextTargetRound),
+ 1, proto.StateProofInterval, true, trackerMemory)
+}
+
+func feedBlocksUntilRound(t *testing.T, l *Ledger, prevBlk bookkeeping.Block, targetRound basics.Round) bookkeeping.Block {
+ for prevBlk.Round() < targetRound {
+ prevBlk.BlockHeader.Round++
+ err := l.AddBlock(prevBlk, agreement.Certificate{})
+ require.NoError(t, err)
+ }
+
+ return prevBlk
+}
+
+func TestLedgerCatchpointSPVerificationTracker(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ proto := config.Consensus[protocol.ConsensusFuture]
+
+ dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
+ genesisInitState, initkeys := ledgertesting.GenerateInitState(t, protocol.ConsensusFuture, 100)
+ genesisInitState.Block.CurrentProtocol = protocol.ConsensusFuture
+ const inMem = true
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ // This assures us that the first catchpoint file will contain exactly 1 state proof data.
+ cfg.CatchpointInterval = proto.StateProofInterval + proto.MaxBalLookback
+ cfg.MaxAcctLookback = 4
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
+ l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
+ require.NoError(t, err)
+
+ firstStateProofDataConfirmedRound := proto.StateProofInterval
+ firstStateProofDataTargetRound := firstStateProofDataConfirmedRound + proto.StateProofInterval
+
+ blk := genesisInitState.Block
+ var sp bookkeeping.StateProofTrackingData
+ sp.StateProofNextRound = basics.Round(firstStateProofDataTargetRound)
+ blk.BlockHeader.StateProofTracking = map[protocol.StateProofType]bookkeeping.StateProofTrackingData{
+ protocol.StateProofBasic: sp,
+ }
+
+ // Feeding blocks until we can know for sure we have at least one catchpoint written.
+ blk = feedBlocksUntilRound(t, l, blk, basics.Round(cfg.CatchpointInterval*2))
+ l.WaitForCommit(basics.Round(cfg.CatchpointInterval * 2))
+ triggerTrackerFlush(t, l, genesisInitState)
+
+ numTrackedDataFirstCatchpoint := (cfg.CatchpointInterval - proto.MaxBalLookback) / proto.StateProofInterval
+
+ verifyStateProofVerificationTracking(t, &l.spVerification, basics.Round(firstStateProofDataTargetRound),
+ numTrackedDataFirstCatchpoint, proto.StateProofInterval, true, any)
+ l.Close()
+
+ l, err = OpenLedger(log, dbName, inMem, genesisInitState, cfg)
+ require.NoError(t, err)
+ defer l.Close()
+
+ verifyStateProofVerificationTracking(t, &l.spVerification, basics.Round(firstStateProofDataTargetRound),
+ numTrackedDataFirstCatchpoint, proto.StateProofInterval, false, any)
+
+ catchpointAccessor, accessorProgress := initializeTestCatchupAccessor(t, l, uint64(len(initkeys)))
+
+ relCatchpointFilePath := filepath.Join(trackerdb.CatchpointDirName, trackerdb.MakeCatchpointFilePath(basics.Round(cfg.CatchpointInterval)))
+ catchpointData := readCatchpointFile(t, relCatchpointFilePath)
+
+ err = catchpointAccessor.ProcessStagingBalances(context.Background(), catchpointData[1].headerName, catchpointData[1].data, &accessorProgress)
+ require.NoError(t, err)
+ err = catchpointAccessor.CompleteCatchup(context.Background())
+ require.NoError(t, err)
+
+ verifyStateProofVerificationTracking(t, &l.spVerification, basics.Round(firstStateProofDataTargetRound),
+ numTrackedDataFirstCatchpoint, proto.StateProofInterval, true, any)
+}
+
+func TestLedgerSPTrackerAfterReplay(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ genesisInitState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion
+ const inMem = true
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = true
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
+ l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
+ a.NoError(err)
+ defer l.Close()
+
+ // Add 1024 empty block without advancing NextStateProofRound
+ firstStateProofRound := basics.Round(proto.StateProofInterval * 2) // 512
+ blk := genesisInitState.Block
+ var sp bookkeeping.StateProofTrackingData
+ sp.StateProofNextRound = firstStateProofRound // 512
+ blk.BlockHeader.StateProofTracking = map[protocol.StateProofType]bookkeeping.StateProofTrackingData{
+ protocol.StateProofBasic: sp,
+ }
+
+ for i := uint64(0); i < proto.StateProofInterval*4; i++ {
+ blk.BlockHeader.Round++
+ blk.BlockHeader.TimeStamp += 10
+ err = l.AddBlock(blk, agreement.Certificate{})
+ a.NoError(err)
+ }
+
+ // 1024
+ verifyStateProofVerificationTracking(t, &l.spVerification, firstStateProofRound, 1, proto.StateProofInterval, true, any)
+ a.Equal(0, len(l.spVerification.pendingDeleteContexts))
+
+ // Add StateProof transaction (for round 512) and apply without validating, advancing the NextStateProofRound to 768
+ spblk := createBlkWithStateproof(t, int(blk.BlockHeader.Round), proto, genesisInitState, l, genesisInitState.Accounts)
+ err = l.AddBlock(spblk, agreement.Certificate{})
+ a.NoError(err)
+ a.Equal(1, len(l.spVerification.pendingDeleteContexts))
+ // To be deleted, but not yet deleted (waiting for commit)
+ verifyStateProofVerificationTracking(t, &l.spVerification, firstStateProofRound, 1, proto.StateProofInterval, true, any)
+
+ triggerTrackerFlush(t, l, genesisInitState)
+
+ err = l.reloadLedger()
+ a.NoError(err)
+
+ a.Equal(1, len(l.spVerification.pendingDeleteContexts))
+ verifyStateProofVerificationTracking(t, &l.spVerification, firstStateProofRound, 1, proto.StateProofInterval, true, any)
}
diff --git a/ledger/ledgercore/catchpointlabel.go b/ledger/ledgercore/catchpointlabel.go
index 6f0b90f9d..6ed5cfd27 100644
--- a/ledger/ledgercore/catchpointlabel.go
+++ b/ledger/ledgercore/catchpointlabel.go
@@ -34,44 +34,93 @@ var base32Encoder = base32.StdEncoding.WithPadding(base32.NoPadding)
// ErrCatchpointParsingFailed is used when we attempt to parse and catchpoint label and failing doing so.
var ErrCatchpointParsingFailed = errors.New("catchpoint parsing failed")
-// CatchpointLabel represent a single catchpoint label. It will "assemble" a label based on the components
-type CatchpointLabel struct {
+// CatchpointLabelMaker is used for abstract the creation of different catchpoints versions.
+// Different catchpoint version might hash different fields.
+type CatchpointLabelMaker interface {
+ // buffer returns a image used for hashing. (concatenating all fields in the label)
+ buffer() []byte
+ // round returns the catchpoint label round
+ round() basics.Round
+ // message returns a printable string containing all the relevant fields in the label.
+ message() string
+}
+
+// CatchpointLabelMakerV6 represent a single catchpoint label maker, matching catchpoints of version V6 and below.
+type CatchpointLabelMakerV6 struct {
ledgerRound basics.Round
ledgerRoundBlockHash crypto.Digest
balancesMerkleRoot crypto.Digest
totals AccountTotals
}
-// MakeCatchpointLabel creates a catchpoint label given the catchpoint label parameters.
-func MakeCatchpointLabel(ledgerRound basics.Round, ledgerRoundBlockHash crypto.Digest, balancesMerkleRoot crypto.Digest, totals AccountTotals) CatchpointLabel {
- return CatchpointLabel{
+// MakeCatchpointLabelMakerV6 creates a V6 catchpoint label given the catchpoint label parameters.
+func MakeCatchpointLabelMakerV6(ledgerRound basics.Round, ledgerRoundBlockHash *crypto.Digest,
+ balancesMerkleRoot *crypto.Digest, totals AccountTotals) *CatchpointLabelMakerV6 {
+ return &CatchpointLabelMakerV6{
ledgerRound: ledgerRound,
- ledgerRoundBlockHash: ledgerRoundBlockHash,
- balancesMerkleRoot: balancesMerkleRoot,
+ ledgerRoundBlockHash: *ledgerRoundBlockHash,
+ balancesMerkleRoot: *balancesMerkleRoot,
totals: totals,
}
}
-// String return the user-facing representation of this catchpoint label. ( i.e. the "label" )
-func (l CatchpointLabel) String() string {
- hash := l.Hash()
- encodedHash := base32Encoder.EncodeToString(hash[:])
- out := fmt.Sprintf("%d#%s", l.ledgerRound, encodedHash)
- logging.Base().Infof("Creating a catchpoint label %s for round=%d, block digest=%s, accounts digest=%s", out, l.ledgerRound, l.ledgerRoundBlockHash, l.balancesMerkleRoot)
- return out
-}
-
-// Hash return the hash portion of this catchpoint label
-func (l CatchpointLabel) Hash() crypto.Digest {
+func (l *CatchpointLabelMakerV6) buffer() []byte {
encodedTotals := protocol.EncodeReflect(&l.totals)
buffer := make([]byte, 2*crypto.DigestSize+len(encodedTotals))
copy(buffer[:], l.ledgerRoundBlockHash[:])
copy(buffer[crypto.DigestSize:], l.balancesMerkleRoot[:])
copy(buffer[crypto.DigestSize*2:], encodedTotals)
- return crypto.Hash(buffer[:crypto.DigestSize*2+len(encodedTotals)])
+
+ return buffer
+}
+
+func (l *CatchpointLabelMakerV6) round() basics.Round {
+ return l.ledgerRound
+}
+
+func (l *CatchpointLabelMakerV6) message() string {
+ return fmt.Sprintf("round=%d, block digest=%s, accounts digest=%s", l.ledgerRound, l.ledgerRoundBlockHash, l.balancesMerkleRoot)
+}
+
+// CatchpointLabelMakerCurrent represent a single catchpoint maker, matching catchpoints of version V7 and above.
+type CatchpointLabelMakerCurrent struct {
+ v6Label CatchpointLabelMakerV6
+ spVerificationHash crypto.Digest
+}
+
+// MakeCatchpointLabelMakerCurrent creates a catchpoint label given the catchpoint label parameters.
+func MakeCatchpointLabelMakerCurrent(ledgerRound basics.Round, ledgerRoundBlockHash *crypto.Digest,
+ balancesMerkleRoot *crypto.Digest, totals AccountTotals, spVerificationContextHash *crypto.Digest) *CatchpointLabelMakerCurrent {
+ return &CatchpointLabelMakerCurrent{
+ v6Label: *MakeCatchpointLabelMakerV6(ledgerRound, ledgerRoundBlockHash, balancesMerkleRoot, totals),
+ spVerificationHash: *spVerificationContextHash,
+ }
+}
+
+func (l *CatchpointLabelMakerCurrent) buffer() []byte {
+ v6Buffer := l.v6Label.buffer()
+
+ return append(v6Buffer, l.spVerificationHash[:]...)
+}
+
+func (l *CatchpointLabelMakerCurrent) round() basics.Round {
+ return l.v6Label.round()
+}
+
+func (l *CatchpointLabelMakerCurrent) message() string {
+ return fmt.Sprintf("%s spver digest=%s", l.v6Label.message(), l.spVerificationHash)
+}
+
+// MakeLabel returns the user-facing representation of this catchpoint label. ( i.e. the "label" )
+func MakeLabel(l CatchpointLabelMaker) string {
+ hash := crypto.Hash(l.buffer())
+ encodedHash := base32Encoder.EncodeToString(hash[:])
+ out := fmt.Sprintf("%d#%s", l.round(), encodedHash)
+ logging.Base().Infof("Creating a catchpoint label %s for %s", out, l.message())
+ return out
}
-// ParseCatchpointLabel parse the given label and breaks it into the round and hash components. In case of a parsing failuire,
+// ParseCatchpointLabel parse the given label and breaks it into the round and hash components. In case of a parsing failure,
// the returned err is non-nil.
func ParseCatchpointLabel(label string) (round basics.Round, hash crypto.Digest, err error) {
err = ErrCatchpointParsingFailed
diff --git a/ledger/ledgercore/catchpointlabel_test.go b/ledger/ledgercore/catchpointlabel_test.go
index 1bfbe3115..d2b0ad41f 100644
--- a/ledger/ledgercore/catchpointlabel_test.go
+++ b/ledger/ledgercore/catchpointlabel_test.go
@@ -32,10 +32,12 @@ func TestUniqueCatchpointLabel(t *testing.T) {
uniqueSet := make(map[string]bool)
ledgerRoundBlockHashes := []crypto.Digest{}
+ stateProofVerificationContextHashes := []crypto.Digest{}
balancesMerkleRoots := []crypto.Digest{}
totals := []AccountTotals{}
for i := 0; i < 10; i++ {
ledgerRoundBlockHashes = append(ledgerRoundBlockHashes, crypto.Hash([]byte{byte(i)}))
+ stateProofVerificationContextHashes = append(stateProofVerificationContextHashes, crypto.Hash([]byte{byte(i), byte(1)}))
balancesMerkleRoots = append(balancesMerkleRoots, crypto.Hash([]byte{byte(i), byte(i), byte(1)}))
totals = append(totals,
AccountTotals{
@@ -47,10 +49,13 @@ func TestUniqueCatchpointLabel(t *testing.T) {
for r := basics.Round(0); r <= basics.Round(100); r += basics.Round(7) {
for _, ledgerRoundHash := range ledgerRoundBlockHashes {
for _, balancesMerkleRoot := range balancesMerkleRoots {
- for _, total := range totals {
- label := MakeCatchpointLabel(r, ledgerRoundHash, balancesMerkleRoot, total)
- require.False(t, uniqueSet[label.String()])
- uniqueSet[label.String()] = true
+ for _, stateProofVerificationContextHash := range stateProofVerificationContextHashes {
+ for _, total := range totals {
+ labelMaker := MakeCatchpointLabelMakerCurrent(r, &ledgerRoundHash, &balancesMerkleRoot, total, &stateProofVerificationContextHash)
+ labelString := MakeLabel(labelMaker)
+ require.False(t, uniqueSet[labelString])
+ uniqueSet[labelString] = true
+ }
}
}
}
@@ -61,10 +66,12 @@ func TestCatchpointLabelParsing(t *testing.T) {
partitiontest.PartitionTest(t)
ledgerRoundBlockHashes := []crypto.Digest{}
+ stateProofVerificationContextHashes := []crypto.Digest{}
balancesMerkleRoots := []crypto.Digest{}
totals := []AccountTotals{}
for i := 0; i < 10; i++ {
ledgerRoundBlockHashes = append(ledgerRoundBlockHashes, crypto.Hash([]byte{byte(i)}))
+ stateProofVerificationContextHashes = append(stateProofVerificationContextHashes, crypto.Hash([]byte{byte(i), byte(1)}))
balancesMerkleRoots = append(balancesMerkleRoots, crypto.Hash([]byte{byte(i), byte(i), byte(1)}))
totals = append(totals,
AccountTotals{
@@ -76,12 +83,15 @@ func TestCatchpointLabelParsing(t *testing.T) {
for r := basics.Round(0); r <= basics.Round(100); r += basics.Round(7) {
for _, ledgerRoundHash := range ledgerRoundBlockHashes {
for _, balancesMerkleRoot := range balancesMerkleRoots {
- for _, total := range totals {
- label := MakeCatchpointLabel(r, ledgerRoundHash, balancesMerkleRoot, total)
- parsedRound, parsedHash, err := ParseCatchpointLabel(label.String())
- require.Equal(t, r, parsedRound)
- require.NotEqual(t, crypto.Digest{}, parsedHash)
- require.NoError(t, err)
+ for _, stateProofVerificationContextHash := range stateProofVerificationContextHashes {
+ for _, total := range totals {
+ labelMaker := MakeCatchpointLabelMakerCurrent(r, &ledgerRoundHash, &balancesMerkleRoot, total, &stateProofVerificationContextHash)
+ labelString := MakeLabel(labelMaker)
+ parsedRound, parsedHash, err := ParseCatchpointLabel(labelString)
+ require.Equal(t, r, parsedRound)
+ require.NotEqual(t, crypto.Digest{}, parsedHash)
+ require.NoError(t, err)
+ }
}
}
}
diff --git a/ledger/ledgercore/error.go b/ledger/ledgercore/error.go
index b1dff71a4..c98312a9a 100644
--- a/ledger/ledgercore/error.go
+++ b/ledger/ledgercore/error.go
@@ -93,21 +93,6 @@ func (err ErrNoEntry) Error() string {
return fmt.Sprintf("ledger does not have entry %d (latest %d, committed %d)", err.Round, err.Latest, err.Committed)
}
-// LogicEvalError indicates TEAL evaluation failure
-type LogicEvalError struct {
- Err error
- Details string
-}
-
-// Error satisfies builtin interface `error`
-func (err LogicEvalError) Error() string {
- msg := fmt.Sprintf("logic eval error: %v", err.Err)
- if len(err.Details) > 0 {
- msg = fmt.Sprintf("%s. Details: %s", msg, err.Details)
- }
- return msg
-}
-
// ErrNonSequentialBlockEval provides feedback when the evaluator cannot be created for
// stale/future rounds.
type ErrNonSequentialBlockEval struct {
diff --git a/ledger/ledgercore/msgp_gen.go b/ledger/ledgercore/msgp_gen.go
index 0d6447a80..709658f43 100644
--- a/ledger/ledgercore/msgp_gen.go
+++ b/ledger/ledgercore/msgp_gen.go
@@ -31,6 +31,14 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// StateProofVerificationContext
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// MarshalMsg implements msgp.Marshaler
func (z *AccountTotals) MarshalMsg(b []byte) (o []byte) {
@@ -937,3 +945,178 @@ func (z *OnlineRoundParamsData) Msgsize() (s int) {
func (z *OnlineRoundParamsData) MsgIsZero() bool {
return ((*z).OnlineSupply == 0) && ((*z).RewardsLevel == 0) && ((*z).CurrentProtocol.MsgIsZero())
}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *StateProofVerificationContext) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(4)
+ var zb0001Mask uint8 /* 5 bits */
+ if (*z).OnlineTotalWeight.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
+ if (*z).LastAttestedRound.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ if (*z).Version.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x8
+ }
+ if (*z).VotersCommitment.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x10
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "pw"
+ o = append(o, 0xa2, 0x70, 0x77)
+ o = (*z).OnlineTotalWeight.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "spround"
+ o = append(o, 0xa7, 0x73, 0x70, 0x72, 0x6f, 0x75, 0x6e, 0x64)
+ o = (*z).LastAttestedRound.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x8) == 0 { // if not empty
+ // string "v"
+ o = append(o, 0xa1, 0x76)
+ o = (*z).Version.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x10) == 0 { // if not empty
+ // string "vc"
+ o = append(o, 0xa2, 0x76, 0x63)
+ o = (*z).VotersCommitment.MarshalMsg(o)
+ }
+ }
+ return
+}
+
+func (_ *StateProofVerificationContext) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*StateProofVerificationContext)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *StateProofVerificationContext) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).LastAttestedRound.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "LastAttestedRound")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).VotersCommitment.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VotersCommitment")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).OnlineTotalWeight.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "OnlineTotalWeight")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Version.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Version")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = StateProofVerificationContext{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "spround":
+ bts, err = (*z).LastAttestedRound.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "LastAttestedRound")
+ return
+ }
+ case "vc":
+ bts, err = (*z).VotersCommitment.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VotersCommitment")
+ return
+ }
+ case "pw":
+ bts, err = (*z).OnlineTotalWeight.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "OnlineTotalWeight")
+ return
+ }
+ case "v":
+ bts, err = (*z).Version.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Version")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *StateProofVerificationContext) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*StateProofVerificationContext)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *StateProofVerificationContext) Msgsize() (s int) {
+ s = 1 + 8 + (*z).LastAttestedRound.Msgsize() + 3 + (*z).VotersCommitment.Msgsize() + 3 + (*z).OnlineTotalWeight.Msgsize() + 2 + (*z).Version.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *StateProofVerificationContext) MsgIsZero() bool {
+ return ((*z).LastAttestedRound.MsgIsZero()) && ((*z).VotersCommitment.MsgIsZero()) && ((*z).OnlineTotalWeight.MsgIsZero()) && ((*z).Version.MsgIsZero())
+}
diff --git a/ledger/ledgercore/msgp_gen_test.go b/ledger/ledgercore/msgp_gen_test.go
index 1f3c3a3d0..8478b9d8b 100644
--- a/ledger/ledgercore/msgp_gen_test.go
+++ b/ledger/ledgercore/msgp_gen_test.go
@@ -193,3 +193,63 @@ func BenchmarkUnmarshalOnlineRoundParamsData(b *testing.B) {
}
}
}
+
+func TestMarshalUnmarshalStateProofVerificationContext(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := StateProofVerificationContext{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingStateProofVerificationContext(t *testing.T) {
+ protocol.RunEncodingTest(t, &StateProofVerificationContext{})
+}
+
+func BenchmarkMarshalMsgStateProofVerificationContext(b *testing.B) {
+ v := StateProofVerificationContext{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgStateProofVerificationContext(b *testing.B) {
+ v := StateProofVerificationContext{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalStateProofVerificationContext(b *testing.B) {
+ v := StateProofVerificationContext{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/ledger/ledgercore/statedelta.go b/ledger/ledgercore/statedelta.go
index 7672f0a86..7e9e25e0d 100644
--- a/ledger/ledgercore/statedelta.go
+++ b/ledger/ledgercore/statedelta.go
@@ -110,8 +110,9 @@ type StateDelta struct {
// new block header; read-only
Hdr *bookkeeping.BlockHeader
- // next round for which we expect a state proof.
- // zero if no state proof is expected.
+ // StateProofNext represents modification on StateProofNextRound field in the block header. If the block contains
+ // a valid state proof transaction, this field will contain the next round for state proof.
+ // otherwise it will be set to 0.
StateProofNext basics.Round
// previous block timestamp
@@ -218,6 +219,32 @@ func (sd *StateDelta) PopulateStateDelta(hdr *bookkeeping.BlockHeader, prevTimes
sd.PrevTimestamp = prevTimestamp
}
+// Hydrate reverses the effects of Dehydrate, restoring internal data.
+func (sd *StateDelta) Hydrate() {
+ sd.Accts.Hydrate()
+}
+
+// Dehydrate normalized the fields of this StateDelta, and clears any redundant internal caching.
+// This is useful for comparing StateDelta objects for equality.
+//
+// NOTE: initialHint is lost in dehydration. All other fields can be restored by calling Hydrate()
+func (sd *StateDelta) Dehydrate() {
+ sd.Accts.Dehydrate()
+ sd.initialHint = 0
+ if sd.KvMods == nil {
+ sd.KvMods = make(map[string]KvValueDelta)
+ }
+ if sd.Txids == nil {
+ sd.Txids = make(map[transactions.Txid]IncludedTransactions)
+ }
+ if sd.Txleases == nil {
+ sd.Txleases = make(map[Txlease]basics.Round)
+ }
+ if sd.Creatables == nil {
+ sd.Creatables = make(map[basics.CreatableIndex]ModifiedCreatable)
+ }
+}
+
// MakeAccountDeltas creates account delta
// if adding new fields make sure to add them to the .reset() and .isEmpty() methods
func MakeAccountDeltas(hint int) AccountDeltas {
@@ -227,6 +254,62 @@ func MakeAccountDeltas(hint int) AccountDeltas {
}
}
+// Hydrate reverses the effects of Dehydrate, restoring internal data.
+func (ad *AccountDeltas) Hydrate() {
+ if ad.acctsCache == nil {
+ ad.acctsCache = make(map[basics.Address]int, len(ad.Accts))
+ }
+ for idx, acct := range ad.Accts {
+ ad.acctsCache[acct.Addr] = idx
+ }
+
+ if ad.appResourcesCache == nil {
+ ad.appResourcesCache = make(map[AccountApp]int, len(ad.AppResources))
+ }
+ for idx, app := range ad.AppResources {
+ ad.appResourcesCache[AccountApp{app.Addr, app.Aidx}] = idx
+ }
+
+ if ad.assetResourcesCache == nil {
+ ad.assetResourcesCache = make(map[AccountAsset]int, len(ad.AssetResources))
+ }
+ for idx, asset := range ad.AssetResources {
+ ad.assetResourcesCache[AccountAsset{asset.Addr, asset.Aidx}] = idx
+ }
+}
+
+// Dehydrate normalized the fields of this AccountDeltas, and clears any redundant internal caching.
+// This is useful for comparing AccountDeltas objects for equality.
+func (ad *AccountDeltas) Dehydrate() {
+ if ad.Accts == nil {
+ ad.Accts = []BalanceRecord{}
+ }
+ if ad.AppResources == nil {
+ ad.AppResources = []AppResourceRecord{}
+ }
+ if ad.AssetResources == nil {
+ ad.AssetResources = []AssetResourceRecord{}
+ }
+ if ad.acctsCache == nil {
+ ad.acctsCache = make(map[basics.Address]int)
+ }
+ for key := range ad.acctsCache {
+ delete(ad.acctsCache, key)
+ }
+ if ad.appResourcesCache == nil {
+ ad.appResourcesCache = make(map[AccountApp]int)
+ }
+ for key := range ad.appResourcesCache {
+ delete(ad.appResourcesCache, key)
+ }
+ if ad.assetResourcesCache == nil {
+ ad.assetResourcesCache = make(map[AccountAsset]int)
+ }
+ for key := range ad.assetResourcesCache {
+ delete(ad.assetResourcesCache, key)
+ }
+}
+
// Reset resets the StateDelta for re-use with sync.Pool
func (sd *StateDelta) Reset() {
sd.Accts.reset()
@@ -359,21 +442,17 @@ func (ad AccountDeltas) ModifiedAccounts() []basics.Address {
// MergeAccounts applies other accounts into this StateDelta accounts
func (ad *AccountDeltas) MergeAccounts(other AccountDeltas) {
- for new := range other.Accts {
- addr := other.Accts[new].Addr
- acct := other.Accts[new].AccountData
- ad.Upsert(addr, acct)
+ for i := range other.Accts {
+ balanceRecord := &other.Accts[i]
+ ad.Upsert(balanceRecord.Addr, balanceRecord.AccountData)
}
-
- for aapp, idx := range other.appResourcesCache {
- params := other.AppResources[idx].Params
- state := other.AppResources[idx].State
- ad.UpsertAppResource(aapp.Address, aapp.App, params, state)
+ for i := range other.AppResources {
+ appResource := &other.AppResources[i]
+ ad.UpsertAppResource(appResource.Addr, appResource.Aidx, appResource.Params, appResource.State)
}
- for aapp, idx := range other.assetResourcesCache {
- params := other.AssetResources[idx].Params
- holding := other.AssetResources[idx].Holding
- ad.UpsertAssetResource(aapp.Address, aapp.Asset, params, holding)
+ for i := range other.AssetResources {
+ assetResource := &other.AssetResources[i]
+ ad.UpsertAssetResource(assetResource.Addr, assetResource.Aidx, assetResource.Params, assetResource.Holding)
}
}
diff --git a/ledger/ledgercore/statedelta_test.go b/ledger/ledgercore/statedelta_test.go
index dd0f3e201..2e683a410 100644
--- a/ledger/ledgercore/statedelta_test.go
+++ b/ledger/ledgercore/statedelta_test.go
@@ -20,6 +20,7 @@ import (
"reflect"
"testing"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/crypto"
@@ -99,6 +100,339 @@ func TestAccountDeltas(t *testing.T) {
a.Equal(sample1, data)
}
+func TestAccountDeltasMergeAccountsOrder(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ addr1 := randomAddress()
+ data1 := AccountData{AccountBaseData: AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 111}}}
+ addr2 := randomAddress()
+ data2 := AccountData{AccountBaseData: AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 222}}}
+ addr3 := randomAddress()
+ data3 := AccountData{AccountBaseData: AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 333}}}
+ addr4 := randomAddress()
+ data4 := AccountData{AccountBaseData: AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 444}}}
+
+ asset1 := basics.AssetIndex(100)
+ asset1Params := AssetParamsDelta{
+ Params: &basics.AssetParams{Total: 1},
+ }
+ asset2 := basics.AssetIndex(200)
+ asset2Params := AssetParamsDelta{
+ Params: &basics.AssetParams{Total: 2},
+ }
+ asset3 := basics.AssetIndex(300)
+ asset3Params := AssetParamsDelta{
+ Params: &basics.AssetParams{Total: 3},
+ }
+ asset4 := basics.AssetIndex(400)
+ asset4Params := AssetParamsDelta{
+ Params: &basics.AssetParams{Total: 4},
+ }
+
+ app1 := basics.AppIndex(101)
+ app1Params := AppParamsDelta{
+ Params: &basics.AppParams{ApprovalProgram: []byte("app1")},
+ }
+ app2 := basics.AppIndex(201)
+ app2Params := AppParamsDelta{
+ Params: &basics.AppParams{ApprovalProgram: []byte("app2")},
+ }
+ app3 := basics.AppIndex(301)
+ app3Params := AppParamsDelta{
+ Params: &basics.AppParams{ApprovalProgram: []byte("app3")},
+ }
+ app4 := basics.AppIndex(401)
+ app4Params := AppParamsDelta{
+ Params: &basics.AppParams{ApprovalProgram: []byte("app4")},
+ }
+
+ var ad1 AccountDeltas
+ ad1.Upsert(addr1, data1)
+ ad1.Upsert(addr2, data2)
+ ad1.UpsertAssetResource(addr1, asset1, asset1Params, AssetHoldingDelta{})
+ ad1.UpsertAssetResource(addr2, asset2, asset2Params, AssetHoldingDelta{})
+ ad1.UpsertAppResource(addr1, app1, app1Params, AppLocalStateDelta{})
+ ad1.UpsertAppResource(addr2, app2, app2Params, AppLocalStateDelta{})
+
+ var ad2 AccountDeltas
+ ad2.Upsert(addr3, data3)
+ ad2.Upsert(addr4, data4)
+ ad2.UpsertAssetResource(addr3, asset3, asset3Params, AssetHoldingDelta{})
+ ad2.UpsertAssetResource(addr4, asset4, asset4Params, AssetHoldingDelta{})
+ ad2.UpsertAppResource(addr3, app3, app3Params, AppLocalStateDelta{})
+ ad2.UpsertAppResource(addr4, app4, app4Params, AppLocalStateDelta{})
+
+ // Iterate to ensure deterministic order
+ for i := 0; i < 10; i++ {
+ var merged AccountDeltas
+ merged.MergeAccounts(ad1)
+ merged.MergeAccounts(ad2)
+
+ var expectedAccounts []BalanceRecord
+ expectedAccounts = append(expectedAccounts, ad1.Accts...)
+ expectedAccounts = append(expectedAccounts, ad2.Accts...)
+ require.Equal(t, expectedAccounts, merged.Accts)
+
+ var expectedAppResources []AppResourceRecord
+ expectedAppResources = append(expectedAppResources, ad1.AppResources...)
+ expectedAppResources = append(expectedAppResources, ad2.AppResources...)
+ require.Equal(t, expectedAppResources, merged.AppResources)
+
+ var expectedAssetResources []AssetResourceRecord
+ expectedAssetResources = append(expectedAssetResources, ad1.AssetResources...)
+ expectedAssetResources = append(expectedAssetResources, ad2.AssetResources...)
+ require.Equal(t, expectedAssetResources, merged.AssetResources)
+ }
+}
+
+func TestAccountDeltasDehydrateAndHydrate(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ addr1 := randomAddress()
+ data1 := AccountData{AccountBaseData: AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 111}}}
+ addr2 := randomAddress()
+ data2 := AccountData{AccountBaseData: AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 222}}}
+
+ asset1 := basics.AssetIndex(100)
+ asset1Params := AssetParamsDelta{
+ Params: &basics.AssetParams{Total: 1},
+ }
+ asset2 := basics.AssetIndex(200)
+ asset2Params := AssetParamsDelta{
+ Params: &basics.AssetParams{Total: 2},
+ }
+
+ app1 := basics.AppIndex(101)
+ app1Params := AppParamsDelta{
+ Params: &basics.AppParams{ApprovalProgram: []byte("app1")},
+ }
+ app2 := basics.AppIndex(201)
+ app2Params := AppParamsDelta{
+ Params: &basics.AppParams{ApprovalProgram: []byte("app2")},
+ }
+
+ var ad AccountDeltas
+ ad.Upsert(addr1, data1)
+ ad.Upsert(addr2, data2)
+ ad.UpsertAssetResource(addr1, asset1, asset1Params, AssetHoldingDelta{})
+ ad.UpsertAssetResource(addr2, asset2, asset2Params, AssetHoldingDelta{})
+ ad.UpsertAppResource(addr1, app1, app1Params, AppLocalStateDelta{})
+ ad.UpsertAppResource(addr2, app2, app2Params, AppLocalStateDelta{})
+
+ var adCopy AccountDeltas
+ adCopy.Upsert(addr1, data1)
+ adCopy.Upsert(addr2, data2)
+ adCopy.UpsertAssetResource(addr1, asset1, asset1Params, AssetHoldingDelta{})
+ adCopy.UpsertAssetResource(addr2, asset2, asset2Params, AssetHoldingDelta{})
+ adCopy.UpsertAppResource(addr1, app1, app1Params, AppLocalStateDelta{})
+ adCopy.UpsertAppResource(addr2, app2, app2Params, AppLocalStateDelta{})
+
+ shallowAd := AccountDeltas{
+ Accts: []BalanceRecord{
+ {
+ Addr: addr1,
+ AccountData: data1,
+ },
+ {
+ Addr: addr2,
+ AccountData: data2,
+ },
+ },
+ acctsCache: make(map[basics.Address]int),
+ AssetResources: []AssetResourceRecord{
+ {
+ Aidx: asset1,
+ Addr: addr1,
+ Params: asset1Params,
+ },
+ {
+ Aidx: asset2,
+ Addr: addr2,
+ Params: asset2Params,
+ },
+ },
+ assetResourcesCache: make(map[AccountAsset]int),
+ AppResources: []AppResourceRecord{
+ {
+ Aidx: app1,
+ Addr: addr1,
+ Params: app1Params,
+ },
+ {
+ Aidx: app2,
+ Addr: addr2,
+ Params: app2Params,
+ },
+ },
+ appResourcesCache: make(map[AccountApp]int),
+ }
+
+ require.Equal(t, adCopy, ad) // should be identical
+ require.NotEqual(t, shallowAd, ad) // shallowAd has empty internal fields
+
+ ad.Dehydrate()
+
+ // Dehydration empties the internal fields
+ require.Equal(t, shallowAd, ad)
+ require.NotEqual(t, adCopy, ad)
+
+ ad.Hydrate()
+
+ // Hydration restores the internal fields
+ require.Equal(t, adCopy, ad)
+ require.NotEqual(t, shallowAd, ad)
+
+ t.Run("NewFieldDetection", func(t *testing.T) {
+ v := reflect.ValueOf(&ad).Elem()
+ st := v.Type()
+ for i := 0; i < v.NumField(); i++ {
+ field := v.Field(i)
+ structField := st.Field(i)
+ isContainer := field.Kind() == reflect.Map || field.Kind() == reflect.Slice
+ if isContainer || !structField.IsExported() {
+ assert.False(t, v.Field(i).IsZero(), "new container or private field \"%v\" added to AccountDeltas, please update AccountDeltas.Hydrate() and .Dehydrate() to handle it before fixing the test", structField.Name)
+ }
+ }
+ })
+}
+
+func TestStateDeltaDehydrateAndHydrate(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ addr := randomAddress()
+ data := AccountData{AccountBaseData: AccountBaseData{MicroAlgos: basics.MicroAlgos{Raw: 111}}}
+
+ asset := basics.AssetIndex(100)
+ assetParams := AssetParamsDelta{
+ Params: &basics.AssetParams{Total: 1},
+ }
+
+ app := basics.AppIndex(101)
+ appParams := AppParamsDelta{
+ Params: &basics.AppParams{ApprovalProgram: []byte("app1")},
+ }
+
+ prevTimestamp := int64(77)
+ stateProofNextRound := basics.Round(88)
+ var hdr bookkeeping.BlockHeader
+
+ sd := MakeStateDelta(&hdr, prevTimestamp, 10, stateProofNextRound)
+ sd.Accts.Upsert(addr, data)
+ sd.Accts.UpsertAssetResource(addr, asset, assetParams, AssetHoldingDelta{})
+ sd.Accts.UpsertAppResource(addr, app, appParams, AppLocalStateDelta{})
+ sd.AddKvMod("key", KvValueDelta{Data: []byte("value")})
+ sd.AddCreatable(100, ModifiedCreatable{
+ Ctype: basics.AssetCreatable,
+ Created: true,
+ Creator: addr,
+ })
+ sd.AddTxLease(Txlease{Sender: addr, Lease: [32]byte{1, 2, 3}}, 2000)
+ sd.Txids = map[transactions.Txid]IncludedTransactions{
+ {5, 4, 3}: {
+ LastValid: 5,
+ },
+ }
+
+ sdCopy := MakeStateDelta(&hdr, prevTimestamp, 10, stateProofNextRound)
+ sdCopy.Accts.Upsert(addr, data)
+ sdCopy.Accts.UpsertAssetResource(addr, asset, assetParams, AssetHoldingDelta{})
+ sdCopy.Accts.UpsertAppResource(addr, app, appParams, AppLocalStateDelta{})
+ sdCopy.AddKvMod("key", KvValueDelta{Data: []byte("value")})
+ sdCopy.AddCreatable(100, ModifiedCreatable{
+ Ctype: basics.AssetCreatable,
+ Created: true,
+ Creator: addr,
+ })
+ sdCopy.AddTxLease(Txlease{Sender: addr, Lease: [32]byte{1, 2, 3}}, 2000)
+ sdCopy.Txids = map[transactions.Txid]IncludedTransactions{
+ {5, 4, 3}: {
+ LastValid: 5,
+ },
+ }
+
+ shallowSd := StateDelta{
+ PrevTimestamp: prevTimestamp,
+ StateProofNext: stateProofNextRound,
+ Hdr: &hdr,
+ Accts: AccountDeltas{
+ Accts: []BalanceRecord{
+ {
+ Addr: addr,
+ AccountData: data,
+ },
+ },
+ acctsCache: make(map[basics.Address]int),
+ AssetResources: []AssetResourceRecord{
+ {
+ Aidx: asset,
+ Addr: addr,
+ Params: assetParams,
+ },
+ },
+ assetResourcesCache: make(map[AccountAsset]int),
+ AppResources: []AppResourceRecord{
+ {
+ Aidx: app,
+ Addr: addr,
+ Params: appParams,
+ },
+ },
+ appResourcesCache: make(map[AccountApp]int),
+ },
+ KvMods: map[string]KvValueDelta{
+ "key": {Data: []byte("value")},
+ },
+ Creatables: map[basics.CreatableIndex]ModifiedCreatable{
+ 100: {
+ Ctype: basics.AssetCreatable,
+ Created: true,
+ Creator: addr,
+ },
+ },
+ Txleases: map[Txlease]basics.Round{
+ {addr, [32]byte{1, 2, 3}}: 2000,
+ },
+ Txids: map[transactions.Txid]IncludedTransactions{
+ {5, 4, 3}: {
+ LastValid: 5,
+ },
+ },
+ }
+
+ require.Equal(t, sdCopy, sd) // should be identical
+ require.NotEqual(t, shallowSd, sd) // shallowSd has empty internal fields
+
+ sd.Dehydrate()
+
+ // Dehydration empties the internal fields
+ require.Equal(t, shallowSd, sd)
+ require.NotEqual(t, sdCopy, sd)
+
+ sd.Hydrate()
+
+ // Hydration restores the internal fields, except for initialHint
+ require.NotEqual(t, sdCopy.initialHint, sd.initialHint)
+ sd.initialHint = sdCopy.initialHint
+ require.Equal(t, sdCopy, sd)
+ require.NotEqual(t, shallowSd, sd)
+
+ t.Run("NewFieldDetection", func(t *testing.T) {
+ v := reflect.ValueOf(&sd).Elem()
+ st := v.Type()
+ for i := 0; i < v.NumField(); i++ {
+ field := v.Field(i)
+ structField := st.Field(i)
+ isContainer := field.Kind() == reflect.Map || field.Kind() == reflect.Slice
+ if isContainer || !structField.IsExported() {
+ assert.False(t, v.Field(i).IsZero(), "new container or private field \"%v\" added to StateDelta, please update StateDelta.Hydrate() and .Dehydrate() to handle it before fixing the test", structField.Name)
+ }
+ }
+ })
+}
+
func TestMakeStateDeltaMaps(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -201,7 +535,7 @@ func TestStateDeltaReflect(t *testing.T) {
st := v.Type()
for i := 0; i < v.NumField(); i++ {
reflectedStateDeltaName := st.Field(i).Name
- require.Containsf(t, stateDeltaFieldNames, reflectedStateDeltaName, "new field:\"%v\" added to StateDelta, please update StateDelta.Reset() to handle it before fixing the test", reflectedStateDeltaName)
+ assert.Containsf(t, stateDeltaFieldNames, reflectedStateDeltaName, "new field:\"%v\" added to StateDelta, please update StateDelta.Reset() to handle it before fixing the test", reflectedStateDeltaName)
}
}
@@ -222,7 +556,7 @@ func TestAccountDeltaReflect(t *testing.T) {
st := v.Type()
for i := 0; i < v.NumField(); i++ {
reflectedAccountDeltaName := st.Field(i).Name
- require.Containsf(t, AccountDeltaFieldNames, reflectedAccountDeltaName, "new field:\"%v\" added to AccountDeltas, please update AccountDeltas.reset() to handle it before fixing the test", reflectedAccountDeltaName)
+ assert.Containsf(t, AccountDeltaFieldNames, reflectedAccountDeltaName, "new field:\"%v\" added to AccountDeltas, please update AccountDeltas.reset() to handle it before fixing the test", reflectedAccountDeltaName)
}
}
diff --git a/ledger/ledgercore/stateproofverification.go b/ledger/ledgercore/stateproofverification.go
new file mode 100644
index 000000000..0220185a2
--- /dev/null
+++ b/ledger/ledgercore/stateproofverification.go
@@ -0,0 +1,51 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledgercore
+
+import (
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+// StateProofVerificationContext represents the context provided by the ledger to verify a state proof transaction.
+type StateProofVerificationContext struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ // LastAttestedRound is the last attested round of the state proof verified using this data.
+ LastAttestedRound basics.Round `codec:"spround"`
+
+ // VotersCommitment is the vector commitment root of the top N accounts to sign the next state proof.
+ VotersCommitment crypto.GenericDigest `codec:"vc"`
+
+ // OnlineTotalWeight is the total amount of stake attesting to the next state proof.
+ OnlineTotalWeight basics.MicroAlgos `codec:"pw"`
+
+ // Version is the protocol version that would be used to verify the state proof
+ Version protocol.ConsensusVersion `codec:"v"`
+}
+
+// MakeStateProofVerificationContext produces a new StateProofVerificationContext instance from a block header and last attested round
+func MakeStateProofVerificationContext(votersHdr *bookkeeping.BlockHeader, lastAttested basics.Round) *StateProofVerificationContext {
+ return &StateProofVerificationContext{
+ VotersCommitment: votersHdr.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment,
+ OnlineTotalWeight: votersHdr.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight,
+ LastAttestedRound: lastAttested,
+ Version: votersHdr.CurrentProtocol,
+ }
+}
diff --git a/ledger/ledgercore/votersForRound.go b/ledger/ledgercore/votersForRound.go
index c1afd9eb5..94901bf20 100644
--- a/ledger/ledgercore/votersForRound.go
+++ b/ledger/ledgercore/votersForRound.go
@@ -39,6 +39,19 @@ type OnlineAccountsFetcher interface {
TopOnlineAccounts(rnd basics.Round, voteRnd basics.Round, n uint64, params *config.ConsensusParams, rewardsLevel uint64) (topOnlineAccounts []*OnlineAccount, totalOnlineStake basics.MicroAlgos, err error)
}
+// LedgerForSPBuilder captures the functionality needed for the creation of the cryptographic state proof builder.
+type LedgerForSPBuilder interface {
+ VotersForStateProof(rnd basics.Round) (*VotersForRound, error)
+ BlockHdr(basics.Round) (bookkeeping.BlockHeader, error)
+}
+
+// VotersCommitListener represents an object that needs to get notified on commit stages in the voters tracker.
+type VotersCommitListener interface {
+ // OnPrepareVoterCommit gives the listener the opportunity to backup VotersForRound data related to rounds (oldBase, newBase] before it is being removed.
+ // The implementation should log any errors that might occur.
+ OnPrepareVoterCommit(oldBase basics.Round, newBase basics.Round, voters LedgerForSPBuilder)
+}
+
// VotersForRound tracks the top online voting accounts as of a particular
// round, along with a Merkle tree commitment to those voting accounts.
type VotersForRound struct {
diff --git a/ledger/metrics.go b/ledger/metrics.go
index 16030c95f..49375ed83 100644
--- a/ledger/metrics.go
+++ b/ledger/metrics.go
@@ -80,7 +80,7 @@ func (mt *metricsTracker) postCommit(ctx context.Context, dcc *deferredCommitCon
func (mt *metricsTracker) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) {
}
-func (mt *metricsTracker) handleUnorderedCommit(*deferredCommitContext) {
+func (mt *metricsTracker) handleUnorderedCommitOrError(*deferredCommitContext) {
}
func (mt *metricsTracker) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
diff --git a/ledger/msgp_gen.go b/ledger/msgp_gen.go
index 39c489e28..4164a526d 100644
--- a/ledger/msgp_gen.go
+++ b/ledger/msgp_gen.go
@@ -6,6 +6,7 @@ import (
"github.com/algorand/msgp/msgp"
"github.com/algorand/go-algorand/ledger/encoded"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
)
// The following msgp objects are implemented in this file:
@@ -41,6 +42,14 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// catchpointStateProofVerificationContext
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// MarshalMsg implements msgp.Marshaler
func (z CatchpointCatchupState) MarshalMsg(b []byte) (o []byte) {
@@ -769,3 +778,161 @@ func (z *catchpointFileChunkV6) Msgsize() (s int) {
func (z *catchpointFileChunkV6) MsgIsZero() bool {
return (len((*z).Balances) == 0) && (len((*z).KVs) == 0)
}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *catchpointStateProofVerificationContext) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0002Len := uint32(1)
+ var zb0002Mask uint8 /* 2 bits */
+ if len((*z).Data) == 0 {
+ zb0002Len--
+ zb0002Mask |= 0x2
+ }
+ // variable map header, size zb0002Len
+ o = append(o, 0x80|uint8(zb0002Len))
+ if zb0002Len != 0 {
+ if (zb0002Mask & 0x2) == 0 { // if not empty
+ // string "spd"
+ o = append(o, 0xa3, 0x73, 0x70, 0x64)
+ if (*z).Data == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).Data)))
+ }
+ for zb0001 := range (*z).Data {
+ o = (*z).Data[zb0001].MarshalMsg(o)
+ }
+ }
+ }
+ return
+}
+
+func (_ *catchpointStateProofVerificationContext) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*catchpointStateProofVerificationContext)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *catchpointStateProofVerificationContext) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0002 int
+ var zb0003 bool
+ zb0002, zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 > 0 {
+ zb0002--
+ var zb0004 int
+ var zb0005 bool
+ zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Data")
+ return
+ }
+ if zb0004 > SPContextPerCatchpointFile {
+ err = msgp.ErrOverflow(uint64(zb0004), uint64(SPContextPerCatchpointFile))
+ err = msgp.WrapError(err, "struct-from-array", "Data")
+ return
+ }
+ if zb0005 {
+ (*z).Data = nil
+ } else if (*z).Data != nil && cap((*z).Data) >= zb0004 {
+ (*z).Data = ((*z).Data)[:zb0004]
+ } else {
+ (*z).Data = make([]ledgercore.StateProofVerificationContext, zb0004)
+ }
+ for zb0001 := range (*z).Data {
+ bts, err = (*z).Data[zb0001].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Data", zb0001)
+ return
+ }
+ }
+ }
+ if zb0002 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0002)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 {
+ (*z) = catchpointStateProofVerificationContext{}
+ }
+ for zb0002 > 0 {
+ zb0002--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "spd":
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Data")
+ return
+ }
+ if zb0006 > SPContextPerCatchpointFile {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(SPContextPerCatchpointFile))
+ err = msgp.WrapError(err, "Data")
+ return
+ }
+ if zb0007 {
+ (*z).Data = nil
+ } else if (*z).Data != nil && cap((*z).Data) >= zb0006 {
+ (*z).Data = ((*z).Data)[:zb0006]
+ } else {
+ (*z).Data = make([]ledgercore.StateProofVerificationContext, zb0006)
+ }
+ for zb0001 := range (*z).Data {
+ bts, err = (*z).Data[zb0001].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Data", zb0001)
+ return
+ }
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *catchpointStateProofVerificationContext) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*catchpointStateProofVerificationContext)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *catchpointStateProofVerificationContext) Msgsize() (s int) {
+ s = 1 + 4 + msgp.ArrayHeaderSize
+ for zb0001 := range (*z).Data {
+ s += (*z).Data[zb0001].Msgsize()
+ }
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *catchpointStateProofVerificationContext) MsgIsZero() bool {
+ return (len((*z).Data) == 0)
+}
diff --git a/ledger/msgp_gen_test.go b/ledger/msgp_gen_test.go
index de29b5f11..729d6efef 100644
--- a/ledger/msgp_gen_test.go
+++ b/ledger/msgp_gen_test.go
@@ -193,3 +193,63 @@ func BenchmarkUnmarshalcatchpointFileChunkV6(b *testing.B) {
}
}
}
+
+func TestMarshalUnmarshalcatchpointStateProofVerificationContext(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := catchpointStateProofVerificationContext{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingcatchpointStateProofVerificationContext(t *testing.T) {
+ protocol.RunEncodingTest(t, &catchpointStateProofVerificationContext{})
+}
+
+func BenchmarkMarshalMsgcatchpointStateProofVerificationContext(b *testing.B) {
+ v := catchpointStateProofVerificationContext{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgcatchpointStateProofVerificationContext(b *testing.B) {
+ v := catchpointStateProofVerificationContext{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalcatchpointStateProofVerificationContext(b *testing.B) {
+ v := catchpointStateProofVerificationContext{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/ledger/notifier.go b/ledger/notifier.go
index 9a2a08f95..d89badce7 100644
--- a/ledger/notifier.go
+++ b/ledger/notifier.go
@@ -123,7 +123,7 @@ func (bn *blockNotifier) postCommit(ctx context.Context, dcc *deferredCommitCont
func (bn *blockNotifier) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) {
}
-func (bn *blockNotifier) handleUnorderedCommit(*deferredCommitContext) {
+func (bn *blockNotifier) handleUnorderedCommitOrError(*deferredCommitContext) {
}
func (bn *blockNotifier) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
diff --git a/ledger/simple_test.go b/ledger/simple_test.go
index d4e44f0c3..1f4c61290 100644
--- a/ledger/simple_test.go
+++ b/ledger/simple_test.go
@@ -28,27 +28,47 @@ import (
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/txntest"
- "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/eval"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/stretchr/testify/require"
)
-func newSimpleLedgerWithConsensusVersion(t testing.TB, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion, cfg config.Local) *Ledger {
+type simpleLedgerCfg struct {
+ onDisk bool // default is in-memory
+ notArchival bool // default is archival
+}
+
+type simpleLedgerOption func(*simpleLedgerCfg)
+
+func simpleLedgerOnDisk() simpleLedgerOption {
+ return func(cfg *simpleLedgerCfg) { cfg.onDisk = true }
+}
+
+func simpleLedgerNotArchival() simpleLedgerOption {
+ return func(cfg *simpleLedgerCfg) { cfg.notArchival = true }
+}
+
+func newSimpleLedgerWithConsensusVersion(t testing.TB, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion, cfg config.Local, opts ...simpleLedgerOption) *Ledger {
var genHash crypto.Digest
crypto.RandBytes(genHash[:])
- return newSimpleLedgerFull(t, balances, cv, genHash, cfg)
+ return newSimpleLedgerFull(t, balances, cv, genHash, cfg, opts...)
}
-func newSimpleLedgerFull(t testing.TB, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion, genHash crypto.Digest, cfg config.Local) *Ledger {
+func newSimpleLedgerFull(t testing.TB, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion, genHash crypto.Digest, cfg config.Local, opts ...simpleLedgerOption) *Ledger {
+ var slCfg simpleLedgerCfg
+ for _, opt := range opts {
+ opt(&slCfg)
+ }
genBlock, err := bookkeeping.MakeGenesisBlock(cv, balances, "test", genHash)
require.NoError(t, err)
require.False(t, genBlock.FeeSink.IsZero())
require.False(t, genBlock.RewardsPool.IsZero())
dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
- cfg.Archival = true
- l, err := OpenLedger(logging.Base(), dbName, true, ledgercore.InitState{
+ dbName = strings.Replace(dbName, "/", "_", -1)
+ cfg.Archival = !slCfg.notArchival
+ l, err := OpenLedger(logging.Base(), dbName, !slCfg.onDisk, ledgercore.InitState{
Block: genBlock,
Accounts: balances.Balances,
GenesisHash: genHash,
@@ -58,14 +78,14 @@ func newSimpleLedgerFull(t testing.TB, balances bookkeeping.GenesisBalances, cv
}
// nextBlock begins evaluation of a new block, after ledger creation or endBlock()
-func nextBlock(t testing.TB, ledger *Ledger) *internal.BlockEvaluator {
+func nextBlock(t testing.TB, ledger *Ledger) *eval.BlockEvaluator {
rnd := ledger.Latest()
hdr, err := ledger.BlockHdr(rnd)
require.NoError(t, err)
nextHdr := bookkeeping.MakeBlock(hdr).BlockHeader
nextHdr.TimeStamp = hdr.TimeStamp + 1 // ensure deterministic tests
- eval, err := internal.StartEvaluator(ledger, nextHdr, internal.EvaluatorOptions{
+ eval, err := eval.StartEvaluator(ledger, nextHdr, eval.EvaluatorOptions{
Generate: true,
Validate: true, // Do the complete checks that a new txn would be subject to
})
@@ -73,7 +93,7 @@ func nextBlock(t testing.TB, ledger *Ledger) *internal.BlockEvaluator {
return eval
}
-func fillDefaults(t testing.TB, ledger *Ledger, eval *internal.BlockEvaluator, txn *txntest.Txn) {
+func fillDefaults(t testing.TB, ledger *Ledger, eval *eval.BlockEvaluator, txn *txntest.Txn) {
if txn.GenesisHash.IsZero() && ledger.GenesisProto().SupportGenesisHash {
txn.GenesisHash = ledger.GenesisHash()
}
@@ -84,14 +104,14 @@ func fillDefaults(t testing.TB, ledger *Ledger, eval *internal.BlockEvaluator, t
txn.FillDefaults(ledger.GenesisProto())
}
-func txns(t testing.TB, ledger *Ledger, eval *internal.BlockEvaluator, txns ...*txntest.Txn) {
+func txns(t testing.TB, ledger *Ledger, eval *eval.BlockEvaluator, txns ...*txntest.Txn) {
t.Helper()
for _, txn1 := range txns {
txn(t, ledger, eval, txn1)
}
}
-func txn(t testing.TB, ledger *Ledger, eval *internal.BlockEvaluator, txn *txntest.Txn, problem ...string) {
+func txn(t testing.TB, ledger *Ledger, eval *eval.BlockEvaluator, txn *txntest.Txn, problem ...string) {
t.Helper()
fillDefaults(t, ledger, eval, txn)
err := eval.Transaction(txn.SignedTxn(), transactions.ApplyData{})
@@ -106,7 +126,7 @@ func txn(t testing.TB, ledger *Ledger, eval *internal.BlockEvaluator, txn *txnte
require.True(t, len(problem) == 0 || problem[0] == "")
}
-func txgroup(t testing.TB, ledger *Ledger, eval *internal.BlockEvaluator, txns ...*txntest.Txn) error {
+func txgroup(t testing.TB, ledger *Ledger, eval *eval.BlockEvaluator, txns ...*txntest.Txn) error {
t.Helper()
for _, txn := range txns {
fillDefaults(t, ledger, eval, txn)
@@ -117,7 +137,7 @@ func txgroup(t testing.TB, ledger *Ledger, eval *internal.BlockEvaluator, txns .
}
// endBlock completes the block being created, returns the ValidatedBlock for inspection
-func endBlock(t testing.TB, ledger *Ledger, eval *internal.BlockEvaluator) *ledgercore.ValidatedBlock {
+func endBlock(t testing.TB, ledger *Ledger, eval *eval.BlockEvaluator) *ledgercore.ValidatedBlock {
validatedBlock, err := eval.GenerateBlock()
require.NoError(t, err)
err = ledger.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
@@ -180,3 +200,19 @@ func asaParams(t testing.TB, ledger *Ledger, asset basics.AssetIndex) (basics.As
}
return basics.AssetParams{}, fmt.Errorf("bad lookup (%d)", asset)
}
+
+// globals gets the AppParams for an address, app index pair (only works if addr is the creator)
+func globals(t testing.TB, ledger *Ledger, addr basics.Address, app basics.AppIndex) (basics.AppParams, bool) {
+ if globals, ok := lookup(t, ledger, addr).AppParams[app]; ok {
+ return globals, true
+ }
+ return basics.AppParams{}, false
+}
+
+// locals gets the AppLocalState for an address, app index pair
+func locals(t testing.TB, ledger *Ledger, addr basics.Address, app basics.AppIndex) (basics.AppLocalState, bool) {
+ if locals, ok := lookup(t, ledger, addr).AppLocalStates[app]; ok {
+ return locals, true
+ }
+ return basics.AppLocalState{}, false
+}
diff --git a/ledger/simulation/simulation_eval_test.go b/ledger/simulation/simulation_eval_test.go
index 0af8b5ae0..ee5c800f3 100644
--- a/ledger/simulation/simulation_eval_test.go
+++ b/ledger/simulation/simulation_eval_test.go
@@ -20,6 +20,7 @@ import (
"encoding/binary"
"encoding/hex"
"fmt"
+ "strings"
"testing"
"github.com/algorand/go-algorand/crypto"
@@ -38,23 +39,6 @@ import (
"github.com/stretchr/testify/require"
)
-// attachGroupID calculates and assigns the ID for a transaction group.
-// Mutates the group directly.
-func attachGroupID(txns []transactions.SignedTxn) {
- txgroup := transactions.TxGroup{
- TxGroupHashes: make([]crypto.Digest, len(txns)),
- }
- for i, txn := range txns {
- txn.Txn.Group = crypto.Digest{}
- txgroup.TxGroupHashes[i] = crypto.Digest(txn.ID())
- }
- group := crypto.HashObj(txgroup)
-
- for i := range txns {
- txns[i].Txn.Header.Group = group
- }
-}
-
func uint64ToBytes(num uint64) []byte {
ibytes := make([]byte, 8)
binary.BigEndian.PutUint64(ibytes, num)
@@ -62,7 +46,7 @@ func uint64ToBytes(num uint64) []byte {
}
type simulationTestCase struct {
- input []transactions.SignedTxn
+ input simulation.Request
expected simulation.Result
expectedError string
}
@@ -101,21 +85,13 @@ func normalizeEvalDeltas(t *testing.T, actual, expected *transactions.EvalDelta)
func validateSimulationResult(t *testing.T, result simulation.Result) {
t.Helper()
- shouldHaveBlock := true
- if !result.WouldSucceed {
- // WouldSucceed might be false because of missing signatures, in which case a block would
- // still be generated. The only reason for no block would be an eval error.
- for _, groupResult := range result.TxnGroups {
- if len(groupResult.FailureMessage) != 0 {
- shouldHaveBlock = false
- break
- }
+ for _, groupResult := range result.TxnGroups {
+ if len(groupResult.FailureMessage) != 0 {
+ // The only reason for no block is an eval error.
+ assert.Nil(t, result.Block)
+ return
}
}
- if !shouldHaveBlock {
- assert.Nil(t, result.Block)
- return
- }
require.NotNil(t, result.Block)
blockGroups, err := result.Block.Block().DecodePaysetGroups()
@@ -142,34 +118,34 @@ func validateSimulationResult(t *testing.T, result simulation.Result) {
}
}
-func simulationTest(t *testing.T, f func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase) {
+func simulationTest(t *testing.T, f func(env simulationtesting.Environment) simulationTestCase) {
t.Helper()
- l, accounts, txnInfo := simulationtesting.PrepareSimulatorTest(t)
- defer l.Close()
- s := simulation.MakeSimulator(l)
+ env := simulationtesting.PrepareSimulatorTest(t)
+ defer env.Close()
+ s := simulation.MakeSimulator(env.Ledger)
- testcase := f(accounts, txnInfo)
+ testcase := f(env)
actual, err := s.Simulate(testcase.input)
require.NoError(t, err)
validateSimulationResult(t, actual)
- require.Len(t, testcase.expected.TxnGroups, 1, "Test case must expect a single txn group")
- require.Len(t, testcase.expected.TxnGroups[0].Txns, len(testcase.input), "Test case expected a different number of transactions than its input")
+ require.Len(t, testcase.expected.TxnGroups, len(testcase.input.TxnGroups), "Test case must expect the same number of transaction groups as its input")
- for i, inputTxn := range testcase.input {
- if testcase.expected.TxnGroups[0].Txns[i].Txn.Txn.Type == "" {
- // Use Type as a marker for whether the transaction was specified or not. If not
- // specified, replace it with the input txn
- testcase.expected.TxnGroups[0].Txns[i].Txn.SignedTxn = inputTxn
+ for i := range testcase.input.TxnGroups {
+ for j := range testcase.input.TxnGroups[i] {
+ if testcase.expected.TxnGroups[i].Txns[j].Txn.Txn.Type == "" {
+ // Use Type as a marker for whether the transaction was specified or not. If not
+ // specified, replace it with the input txn
+ testcase.expected.TxnGroups[i].Txns[j].Txn.SignedTxn = testcase.input.TxnGroups[i][j]
+ }
+ normalizeEvalDeltas(t, &actual.TxnGroups[i].Txns[j].Txn.EvalDelta, &testcase.expected.TxnGroups[i].Txns[j].Txn.EvalDelta)
}
- normalizeEvalDeltas(t, &actual.TxnGroups[0].Txns[i].Txn.EvalDelta, &testcase.expected.TxnGroups[0].Txns[i].Txn.EvalDelta)
}
if len(testcase.expectedError) != 0 {
require.Contains(t, actual.TxnGroups[0].FailureMessage, testcase.expectedError)
- require.False(t, testcase.expected.WouldSucceed, "Test case WouldSucceed value is not consistent with expected failure")
// if it matched the expected error, copy the actual one so it will pass the equality check below
testcase.expected.TxnGroups[0].FailureMessage = actual.TxnGroups[0].FailureMessage
}
@@ -185,192 +161,153 @@ func TestPayTxn(t *testing.T) {
t.Run("simple", func(t *testing.T) {
t.Parallel()
- for _, signed := range []bool{true, false} {
- signed := signed
- t.Run(fmt.Sprintf("signed=%t", signed), func(t *testing.T) {
- t.Parallel()
- simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase {
- sender := accounts[0]
- receiver := accounts[1]
-
- txn := txnInfo.NewTxn(txntest.Txn{
- Type: protocol.PaymentTx,
- Sender: sender.Addr,
- Receiver: receiver.Addr,
- Amount: 1_000_000,
- }).SignedTxn()
-
- if signed {
- txn = txn.Txn.Sign(sender.Sk)
- }
-
- return simulationTestCase{
- input: []transactions.SignedTxn{txn},
- expected: simulation.Result{
- Version: 1,
- LastRound: txnInfo.LatestRound(),
- TxnGroups: []simulation.TxnGroupResult{
- {
- Txns: []simulation.TxnResult{
- {
- MissingSignature: !signed,
- },
- },
- },
- },
- WouldSucceed: signed,
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+ receiver := env.Accounts[1]
+
+ txn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: sender.Addr,
+ Receiver: receiver.Addr,
+ Amount: 1_000_000,
+ }).Txn().Sign(sender.Sk)
+
+ return simulationTestCase{
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{{txn}},
+ },
+ expected: simulation.Result{
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
+ TxnGroups: []simulation.TxnGroupResult{
+ {
+ Txns: []simulation.TxnResult{{}},
},
- }
- })
- })
- }
+ },
+ },
+ }
+ })
})
t.Run("close to", func(t *testing.T) {
t.Parallel()
- for _, signed := range []bool{true, false} {
- signed := signed
- t.Run(fmt.Sprintf("signed=%t", signed), func(t *testing.T) {
- t.Parallel()
- simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase {
- sender := accounts[0]
- receiver := accounts[1]
- closeTo := accounts[2]
- amount := uint64(1_000_000)
-
- txn := txnInfo.NewTxn(txntest.Txn{
- Type: protocol.PaymentTx,
- Sender: sender.Addr,
- Receiver: receiver.Addr,
- Amount: amount,
- CloseRemainderTo: closeTo.Addr,
- }).SignedTxn()
-
- if signed {
- txn = txn.Txn.Sign(sender.Sk)
- }
-
- expectedClosingAmount := sender.AcctData.MicroAlgos.Raw
- expectedClosingAmount -= amount + txn.Txn.Fee.Raw
-
- return simulationTestCase{
- input: []transactions.SignedTxn{txn},
- expected: simulation.Result{
- Version: 1,
- LastRound: txnInfo.LatestRound(),
- TxnGroups: []simulation.TxnGroupResult{
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+ receiver := env.Accounts[1]
+ closeTo := env.Accounts[2]
+ amount := uint64(1_000_000)
+
+ txn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: sender.Addr,
+ Receiver: receiver.Addr,
+ Amount: amount,
+ CloseRemainderTo: closeTo.Addr,
+ }).Txn().Sign(sender.Sk)
+
+ expectedClosingAmount := sender.AcctData.MicroAlgos.Raw
+ expectedClosingAmount -= amount + txn.Txn.Fee.Raw
+
+ return simulationTestCase{
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{{txn}},
+ },
+ expected: simulation.Result{
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
+ TxnGroups: []simulation.TxnGroupResult{
+ {
+ Txns: []simulation.TxnResult{
{
- Txns: []simulation.TxnResult{
- {
- Txn: transactions.SignedTxnWithAD{
- ApplyData: transactions.ApplyData{
- ClosingAmount: basics.MicroAlgos{Raw: expectedClosingAmount},
- },
- },
- MissingSignature: !signed,
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: transactions.ApplyData{
+ ClosingAmount: basics.MicroAlgos{Raw: expectedClosingAmount},
},
},
},
},
- WouldSucceed: signed,
},
- }
- })
- })
- }
+ },
+ },
+ }
+ })
})
t.Run("overspend", func(t *testing.T) {
t.Parallel()
- for _, signed := range []bool{true, false} {
- signed := signed
- t.Run(fmt.Sprintf("signed=%t", signed), func(t *testing.T) {
- t.Parallel()
- simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase {
- sender := accounts[0]
- receiver := accounts[1]
- amount := sender.AcctData.MicroAlgos.Raw + 100
-
- txn := txnInfo.NewTxn(txntest.Txn{
- Type: protocol.PaymentTx,
- Sender: sender.Addr,
- Receiver: receiver.Addr,
- Amount: amount,
- }).SignedTxn()
-
- if signed {
- txn = txn.Txn.Sign(sender.Sk)
- }
-
- return simulationTestCase{
- input: []transactions.SignedTxn{txn},
- expectedError: fmt.Sprintf("tried to spend {%d}", amount),
- expected: simulation.Result{
- Version: 1,
- LastRound: txnInfo.LatestRound(),
- TxnGroups: []simulation.TxnGroupResult{
- {
- Txns: []simulation.TxnResult{
- {
- MissingSignature: !signed,
- },
- },
- FailedAt: simulation.TxnPath{0},
- },
- },
- WouldSucceed: false,
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+ receiver := env.Accounts[1]
+ amount := sender.AcctData.MicroAlgos.Raw + 100
+
+ txn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: sender.Addr,
+ Receiver: receiver.Addr,
+ Amount: amount,
+ }).Txn().Sign(sender.Sk)
+
+ return simulationTestCase{
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{{txn}},
+ },
+ expectedError: fmt.Sprintf("tried to spend {%d}", amount),
+ expected: simulation.Result{
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
+ TxnGroups: []simulation.TxnGroupResult{
+ {
+ Txns: []simulation.TxnResult{{}},
+ FailedAt: simulation.TxnPath{0},
},
- }
- })
- })
- }
+ },
+ },
+ }
+ })
})
}
func TestWrongAuthorizerTxn(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
-
- for _, signed := range []bool{true, false} {
- signed := signed
- t.Run(fmt.Sprintf("signed=%t", signed), func(t *testing.T) {
+ for _, optionalSigs := range []bool{false, true} {
+ optionalSigs := optionalSigs
+ t.Run(fmt.Sprintf("optionalSigs=%t", optionalSigs), func(t *testing.T) {
t.Parallel()
- simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase {
- sender := accounts[0]
- authority := accounts[1]
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+ authority := env.Accounts[1]
- txn := txnInfo.NewTxn(txntest.Txn{
+ txn := env.TxnInfo.NewTxn(txntest.Txn{
Type: protocol.PaymentTx,
Sender: sender.Addr,
Receiver: sender.Addr,
Amount: 0,
- })
+ }).Txn().Sign(authority.Sk)
- var stxn transactions.SignedTxn
- if signed {
- stxn = txn.Txn().Sign(authority.Sk)
- } else {
- stxn = txn.SignedTxn()
- stxn.AuthAddr = authority.Addr
+ if optionalSigs {
+ // erase signature
+ txn.Sig = crypto.Signature{}
}
return simulationTestCase{
- input: []transactions.SignedTxn{stxn},
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{{txn}},
+ AllowEmptySignatures: optionalSigs,
+ },
expectedError: fmt.Sprintf("should have been authorized by %s but was actually authorized by %s", sender.Addr, authority.Addr),
expected: simulation.Result{
- Version: 1,
- LastRound: txnInfo.LatestRound(),
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
TxnGroups: []simulation.TxnGroupResult{
{
- Txns: []simulation.TxnResult{
- {
- MissingSignature: !signed,
- },
- },
+ Txns: []simulation.TxnResult{{}},
FailedAt: simulation.TxnPath{0},
},
},
- WouldSucceed: false,
+ EvalOverrides: simulation.ResultEvalOverrides{
+ AllowEmptySignatures: optionalSigs,
+ },
},
}
})
@@ -381,82 +318,67 @@ func TestWrongAuthorizerTxn(t *testing.T) {
func TestRekey(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+ authority := env.Accounts[1]
- for _, signed := range []bool{true, false} {
- signed := signed
- t.Run(fmt.Sprintf("signed=%t", signed), func(t *testing.T) {
- t.Parallel()
- simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase {
- sender := accounts[0]
- authority := accounts[1]
+ txn1 := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: sender.Addr,
+ Receiver: sender.Addr,
+ Amount: 1,
+ RekeyTo: authority.Addr,
+ })
+ txn2 := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: sender.Addr,
+ Receiver: sender.Addr,
+ Amount: 2,
+ })
- txn1 := txnInfo.NewTxn(txntest.Txn{
- Type: protocol.PaymentTx,
- Sender: sender.Addr,
- Receiver: sender.Addr,
- Amount: 1,
- RekeyTo: authority.Addr,
- })
- txn2 := txnInfo.NewTxn(txntest.Txn{
- Type: protocol.PaymentTx,
- Sender: sender.Addr,
- Receiver: sender.Addr,
- Amount: 2,
- })
+ txntest.Group(&txn1, &txn2)
- txntest.Group(&txn1, &txn2)
+ stxn1 := txn1.Txn().Sign(sender.Sk)
+ stxn2 := txn2.Txn().Sign(authority.Sk)
- var stxn1 transactions.SignedTxn
- var stxn2 transactions.SignedTxn
- if signed {
- stxn1 = txn1.Txn().Sign(sender.Sk)
- stxn2 = txn2.Txn().Sign(authority.Sk)
- } else {
- stxn1 = txn1.SignedTxn()
- stxn2 = txn2.SignedTxn()
- stxn2.AuthAddr = authority.Addr
- }
- return simulationTestCase{
- input: []transactions.SignedTxn{stxn1, stxn2},
- expected: simulation.Result{
- Version: 1,
- LastRound: txnInfo.LatestRound(),
- TxnGroups: []simulation.TxnGroupResult{
- {
- Txns: []simulation.TxnResult{
- {
- MissingSignature: !signed,
- },
- {
- MissingSignature: !signed,
- },
- },
- },
+ return simulationTestCase{
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{
+ {stxn1, stxn2},
+ },
+ },
+ expected: simulation.Result{
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
+ TxnGroups: []simulation.TxnGroupResult{
+ {
+ Txns: []simulation.TxnResult{
+ {},
+ {},
},
- WouldSucceed: signed,
},
- }
- })
- })
- }
+ },
+ },
+ }
+ })
}
func TestStateProofTxn(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- l, _, txnInfo := simulationtesting.PrepareSimulatorTest(t)
- defer l.Close()
- s := simulation.MakeSimulator(l)
+ env := simulationtesting.PrepareSimulatorTest(t)
+ defer env.Close()
+ s := simulation.MakeSimulator(env.Ledger)
txgroup := []transactions.SignedTxn{
- txnInfo.NewTxn(txntest.Txn{
+ env.TxnInfo.NewTxn(txntest.Txn{
Type: protocol.StateProofTx,
// No need to fill out StateProofTxnFields, this should fail at signature verification
}).SignedTxn(),
}
- _, err := s.Simulate(txgroup)
+ _, err := s.Simulate(simulation.Request{TxnGroups: [][]transactions.SignedTxn{txgroup}})
require.ErrorContains(t, err, "cannot simulate StateProof transactions")
}
@@ -464,64 +386,75 @@ func TestSimpleGroupTxn(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- l, accounts, txnInfo := simulationtesting.PrepareSimulatorTest(t)
- defer l.Close()
- s := simulation.MakeSimulator(l)
- sender1 := accounts[0].Addr
- sender1Balance := accounts[0].AcctData.MicroAlgos
- sender2 := accounts[1].Addr
- sender2Balance := accounts[1].AcctData.MicroAlgos
+ env := simulationtesting.PrepareSimulatorTest(t)
+ defer env.Close()
+ s := simulation.MakeSimulator(env.Ledger)
+ sender1 := env.Accounts[0]
+ sender1Balance := env.Accounts[0].AcctData.MicroAlgos
+ sender2 := env.Accounts[1]
+ sender2Balance := env.Accounts[1].AcctData.MicroAlgos
// Send money back and forth
- txgroup := []transactions.SignedTxn{
- txnInfo.NewTxn(txntest.Txn{
- Type: protocol.PaymentTx,
- Sender: sender1,
- Receiver: sender2,
- Amount: 1_000_000,
- }).SignedTxn(),
- txnInfo.NewTxn(txntest.Txn{
- Type: protocol.PaymentTx,
- Sender: sender2,
- Receiver: sender1,
- Amount: 0,
- }).SignedTxn(),
+ txn1 := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: sender1.Addr,
+ Receiver: sender2.Addr,
+ Amount: 1_000_000,
+ })
+ txn2 := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: sender2.Addr,
+ Receiver: sender1.Addr,
+ Amount: 0,
+ })
+
+ request := simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{
+ {
+ txn1.Txn().Sign(sender1.Sk),
+ txn2.Txn().Sign(sender2.Sk),
+ },
+ },
}
// Should fail if there is no group parameter
- result, err := s.Simulate(txgroup)
+ result, err := s.Simulate(request)
require.NoError(t, err)
- require.False(t, result.WouldSucceed)
require.Len(t, result.TxnGroups, 1)
require.Len(t, result.TxnGroups[0].Txns, 2)
require.Contains(t, result.TxnGroups[0].FailureMessage, "had zero Group but was submitted in a group of 2")
- // Add group parameter
- attachGroupID(txgroup)
+ // Add group parameter and sign again
+ txntest.Group(&txn1, &txn2)
+ request.TxnGroups = [][]transactions.SignedTxn{
+ {
+ txn1.Txn().Sign(sender1.Sk),
+ txn2.Txn().Sign(sender2.Sk),
+ },
+ }
// Check balances before transaction
- sender1Data, _, err := l.LookupWithoutRewards(l.Latest(), sender1)
+ sender1Data, _, err := env.Ledger.LookupWithoutRewards(env.Ledger.Latest(), sender1.Addr)
require.NoError(t, err)
require.Equal(t, sender1Balance, sender1Data.MicroAlgos)
- sender2Data, _, err := l.LookupWithoutRewards(l.Latest(), sender2)
+ sender2Data, _, err := env.Ledger.LookupWithoutRewards(env.Ledger.Latest(), sender2.Addr)
require.NoError(t, err)
require.Equal(t, sender2Balance, sender2Data.MicroAlgos)
// Should now pass
- result, err = s.Simulate(txgroup)
+ result, err = s.Simulate(request)
require.NoError(t, err)
- require.False(t, result.WouldSucceed)
require.Len(t, result.TxnGroups, 1)
require.Len(t, result.TxnGroups[0].Txns, 2)
require.Zero(t, result.TxnGroups[0].FailureMessage)
// Confirm balances have not changed
- sender1Data, _, err = l.LookupWithoutRewards(l.Latest(), sender1)
+ sender1Data, _, err = env.Ledger.LookupWithoutRewards(env.Ledger.Latest(), sender1.Addr)
require.NoError(t, err)
require.Equal(t, sender1Balance, sender1Data.MicroAlgos)
- sender2Data, _, err = l.LookupWithoutRewards(l.Latest(), sender2)
+ sender2Data, _, err = env.Ledger.LookupWithoutRewards(env.Ledger.Latest(), sender2.Addr)
require.NoError(t, err)
require.Equal(t, sender2Balance, sender2Data.MicroAlgos)
}
@@ -541,21 +474,25 @@ btoi`)
name string
arguments [][]byte
expectedError string
+ cost uint64
}{
{
name: "approval",
arguments: [][]byte{{1}},
expectedError: "", // no error
+ cost: 2,
},
{
name: "rejection",
arguments: [][]byte{{0}},
expectedError: "rejected by logic",
+ cost: 2,
},
{
name: "error",
arguments: [][]byte{},
expectedError: "rejected by logic err=cannot load arg[0] of 0",
+ cost: 1,
},
}
@@ -563,16 +500,16 @@ btoi`)
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
- simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase {
- sender := accounts[0]
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
- payTxn := txnInfo.NewTxn(txntest.Txn{
+ payTxn := env.TxnInfo.NewTxn(txntest.Txn{
Type: protocol.PaymentTx,
Sender: sender.Addr,
Receiver: lsigAddr,
Amount: 1_000_000,
})
- appCallTxn := txnInfo.NewTxn(txntest.Txn{
+ appCallTxn := env.TxnInfo.NewTxn(txntest.Txn{
Type: protocol.ApplicationCallTx,
Sender: lsigAddr,
ApprovalProgram: `#pragma version 8
@@ -595,22 +532,29 @@ int 1`,
expectedSuccess := len(testCase.expectedError) == 0
var expectedAppCallAD transactions.ApplyData
expectedFailedAt := simulation.TxnPath{1}
+ var AppBudgetConsumed, AppBudgetAdded uint64
if expectedSuccess {
expectedAppCallAD = transactions.ApplyData{
- ApplicationID: 2,
+ ApplicationID: 1002,
EvalDelta: transactions.EvalDelta{
Logs: []string{"hello"},
},
}
expectedFailedAt = nil
+ AppBudgetConsumed = 3
+ AppBudgetAdded = 700
}
return simulationTestCase{
- input: []transactions.SignedTxn{signedPayTxn, signedAppCallTxn},
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{
+ {signedPayTxn, signedAppCallTxn},
+ },
+ },
expectedError: testCase.expectedError,
expected: simulation.Result{
- Version: 1,
- LastRound: txnInfo.LatestRound(),
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
TxnGroups: []simulation.TxnGroupResult{
{
Txns: []simulation.TxnResult{
@@ -619,12 +563,15 @@ int 1`,
Txn: transactions.SignedTxnWithAD{
ApplyData: expectedAppCallAD,
},
+ AppBudgetConsumed: AppBudgetConsumed,
+ LogicSigBudgetConsumed: testCase.cost,
},
},
- FailedAt: expectedFailedAt,
+ FailedAt: expectedFailedAt,
+ AppBudgetAdded: AppBudgetAdded,
+ AppBudgetConsumed: AppBudgetConsumed,
},
},
- WouldSucceed: expectedSuccess,
},
}
})
@@ -635,21 +582,16 @@ int 1`,
func TestSimpleAppCall(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
-
- for _, signed := range []bool{true, false} {
- signed := signed
- t.Run(fmt.Sprintf("signed=%t", signed), func(t *testing.T) {
- t.Parallel()
- simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase {
- sender := accounts[0]
-
- // Create program and call it
- futureAppID := basics.AppIndex(1)
- createTxn := txnInfo.NewTxn(txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: sender.Addr,
- ApplicationID: 0,
- ApprovalProgram: `#pragma version 6
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+
+ // Create program and call it
+ futureAppID := basics.AppIndex(1001)
+ createTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: 0,
+ ApprovalProgram: `#pragma version 6
txn ApplicationID
bz create
byte "app call"
@@ -661,238 +603,647 @@ log
end:
int 1
`,
- ClearStateProgram: `#pragma version 6
+ ClearStateProgram: `#pragma version 6
int 0
`,
- })
- callTxn := txnInfo.NewTxn(txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: sender.Addr,
- ApplicationID: futureAppID,
- })
-
- txntest.Group(&createTxn, &callTxn)
+ })
+ callTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: futureAppID,
+ })
- signedCreateTxn := createTxn.SignedTxn()
- signedCallTxn := callTxn.SignedTxn()
+ txntest.Group(&createTxn, &callTxn)
- if signed {
- signedCreateTxn = signedCreateTxn.Txn.Sign(sender.Sk)
- signedCallTxn = signedCallTxn.Txn.Sign(sender.Sk)
- }
+ signedCreateTxn := createTxn.Txn().Sign(sender.Sk)
+ signedCallTxn := callTxn.Txn().Sign(sender.Sk)
- return simulationTestCase{
- input: []transactions.SignedTxn{signedCreateTxn, signedCallTxn},
- expected: simulation.Result{
- Version: 1,
- LastRound: txnInfo.LatestRound(),
- TxnGroups: []simulation.TxnGroupResult{
+ return simulationTestCase{
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{
+ {signedCreateTxn, signedCallTxn},
+ },
+ },
+ expected: simulation.Result{
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
+ TxnGroups: []simulation.TxnGroupResult{
+ {
+ Txns: []simulation.TxnResult{
{
- Txns: []simulation.TxnResult{
- {
- Txn: transactions.SignedTxnWithAD{
- ApplyData: transactions.ApplyData{
- ApplicationID: futureAppID,
- EvalDelta: transactions.EvalDelta{
- Logs: []string{"app creation"},
- },
- },
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: transactions.ApplyData{
+ ApplicationID: futureAppID,
+ EvalDelta: transactions.EvalDelta{
+ Logs: []string{"app creation"},
},
- MissingSignature: !signed,
},
- {
- Txn: transactions.SignedTxnWithAD{
- ApplyData: transactions.ApplyData{
- EvalDelta: transactions.EvalDelta{
- Logs: []string{"app call"},
- },
- },
+ },
+ AppBudgetConsumed: 5,
+ },
+ {
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: transactions.ApplyData{
+ EvalDelta: transactions.EvalDelta{
+ Logs: []string{"app call"},
},
- MissingSignature: !signed,
},
},
+ AppBudgetConsumed: 6,
},
},
- WouldSucceed: signed,
+ AppBudgetAdded: 1400,
+ AppBudgetConsumed: 11,
},
- }
- })
- })
- }
+ },
+ },
+ }
+ })
}
func TestRejectAppCall(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
-
- for _, signed := range []bool{true, false} {
- signed := signed
- t.Run(fmt.Sprintf("signed=%t", signed), func(t *testing.T) {
- t.Parallel()
- simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase {
- sender := accounts[0]
-
- futureAppID := basics.AppIndex(1)
- createTxn := txnInfo.NewTxn(txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: sender.Addr,
- ApplicationID: 0,
- ApprovalProgram: `#pragma version 6
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+
+ futureAppID := basics.AppIndex(1001)
+ createTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: 0,
+ ApprovalProgram: `#pragma version 6
byte "app creation"
log
int 0
`,
- ClearStateProgram: `#pragma version 6
+ ClearStateProgram: `#pragma version 6
int 0
`,
- })
-
- signedCreateTxn := createTxn.SignedTxn()
-
- if signed {
- signedCreateTxn = createTxn.Txn().Sign(sender.Sk)
- }
+ })
+ signedCreateTxn := createTxn.Txn().Sign(sender.Sk)
- return simulationTestCase{
- input: []transactions.SignedTxn{signedCreateTxn},
- expectedError: "transaction rejected by ApprovalProgram",
- expected: simulation.Result{
- Version: 1,
- LastRound: txnInfo.LatestRound(),
- TxnGroups: []simulation.TxnGroupResult{
+ return simulationTestCase{
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{{signedCreateTxn}},
+ },
+ expectedError: "transaction rejected by ApprovalProgram",
+ expected: simulation.Result{
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
+ TxnGroups: []simulation.TxnGroupResult{
+ {
+ Txns: []simulation.TxnResult{
{
- Txns: []simulation.TxnResult{
- {
- Txn: transactions.SignedTxnWithAD{
- ApplyData: transactions.ApplyData{
- ApplicationID: futureAppID,
- EvalDelta: transactions.EvalDelta{
- Logs: []string{"app creation"},
- },
- },
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: transactions.ApplyData{
+ ApplicationID: futureAppID,
+ EvalDelta: transactions.EvalDelta{
+ Logs: []string{"app creation"},
},
- MissingSignature: !signed,
},
},
- FailedAt: simulation.TxnPath{0},
+ AppBudgetConsumed: 3,
},
},
- WouldSucceed: false,
+ FailedAt: simulation.TxnPath{0},
+ AppBudgetAdded: 700,
+ AppBudgetConsumed: 3,
},
- }
- })
- })
- }
+ },
+ },
+ }
+ })
}
func TestErrorAppCall(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
-
- for _, signed := range []bool{true, false} {
- signed := signed
- t.Run(fmt.Sprintf("signed=%t", signed), func(t *testing.T) {
- t.Parallel()
- simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase {
- sender := accounts[0]
-
- futureAppID := basics.AppIndex(1)
- createTxn := txnInfo.NewTxn(txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: sender.Addr,
- ApplicationID: 0,
- ApprovalProgram: `#pragma version 6
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+
+ futureAppID := basics.AppIndex(1001)
+ createTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: 0,
+ ApprovalProgram: `#pragma version 6
byte "app creation"
log
err
`,
- ClearStateProgram: `#pragma version 6
+ ClearStateProgram: `#pragma version 6
int 0
`,
- })
+ })
+ signedCreateTxn := createTxn.Txn().Sign(sender.Sk)
- signedCreateTxn := createTxn.SignedTxn()
+ return simulationTestCase{
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{{signedCreateTxn}},
+ },
+ expectedError: "err opcode executed",
+ expected: simulation.Result{
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
+ TxnGroups: []simulation.TxnGroupResult{
+ {
+ Txns: []simulation.TxnResult{
+ {
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: transactions.ApplyData{
+ ApplicationID: futureAppID,
+ EvalDelta: transactions.EvalDelta{
+ Logs: []string{"app creation"},
+ },
+ },
+ },
+ AppBudgetConsumed: 3,
+ },
+ },
+ FailedAt: simulation.TxnPath{0},
+ AppBudgetAdded: 700,
+ AppBudgetConsumed: 3,
+ },
+ },
+ },
+ }
+ })
+}
- if signed {
- signedCreateTxn = createTxn.Txn().Sign(sender.Sk)
- }
+func TestAppCallOverBudget(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
- return simulationTestCase{
- input: []transactions.SignedTxn{signedCreateTxn},
- expectedError: "err opcode executed",
- expected: simulation.Result{
- Version: 1,
- LastRound: txnInfo.LatestRound(),
- TxnGroups: []simulation.TxnGroupResult{
+ // Transaction group has a cost of 4 + 1398
+ expensiveAppSource := `#pragma version 6
+ txn ApplicationID // [appId]
+ bz end // []
+` + strings.Repeat(`int 1
+ pop
+`, 697) + `end:
+ int 1`
+
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+
+ futureAppID := basics.AppIndex(1001)
+ // App create with cost 4
+ createTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: 0,
+ ApprovalProgram: expensiveAppSource,
+ ClearStateProgram: `#pragma version 6
+int 0
+`,
+ })
+ // App call with cost 1398 - will cause a budget exceeded error,
+ // but will only report a cost up to 1396.
+ expensiveTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: futureAppID,
+ })
+
+ txntest.Group(&createTxn, &expensiveTxn)
+
+ signedCreateTxn := createTxn.Txn().Sign(sender.Sk)
+ signedExpensiveTxn := expensiveTxn.Txn().Sign(sender.Sk)
+
+ return simulationTestCase{
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{
+ {signedCreateTxn, signedExpensiveTxn},
+ },
+ },
+ expectedError: "dynamic cost budget exceeded",
+ expected: simulation.Result{
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
+ TxnGroups: []simulation.TxnGroupResult{
+ {
+ Txns: []simulation.TxnResult{
{
- Txns: []simulation.TxnResult{
- {
- Txn: transactions.SignedTxnWithAD{
- ApplyData: transactions.ApplyData{
- ApplicationID: futureAppID,
- EvalDelta: transactions.EvalDelta{
- Logs: []string{"app creation"},
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: transactions.ApplyData{
+ ApplicationID: futureAppID,
+ },
+ },
+ AppBudgetConsumed: 4,
+ },
+ {
+ AppBudgetConsumed: 1396,
+ },
+ },
+ FailedAt: simulation.TxnPath{1},
+ AppBudgetAdded: 1400,
+ AppBudgetConsumed: 1400,
+ },
+ },
+ },
+ }
+ })
+}
+
+func TestAppCallWithExtraBudget(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // Transaction group has a cost of 4 + 1404
+ expensiveAppSource := `#pragma version 6
+ txn ApplicationID // [appId]
+ bz end // []
+` + strings.Repeat(`int 1; pop;`, 700) + `end:
+ int 1`
+
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+
+ futureAppID := basics.AppIndex(1001)
+ // App create with cost 4
+ createTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: 0,
+ ApprovalProgram: expensiveAppSource,
+ ClearStateProgram: "#pragma version 6\nint 0",
+ })
+ // Expensive 700 repetition of int 1 and pop total cost 1404
+ expensiveTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: futureAppID,
+ })
+
+ txntest.Group(&createTxn, &expensiveTxn)
+
+ signedCreateTxn := createTxn.Txn().Sign(sender.Sk)
+ signedExpensiveTxn := expensiveTxn.Txn().Sign(sender.Sk)
+ extraOpcodeBudget := uint64(100)
+
+ return simulationTestCase{
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{
+ {signedCreateTxn, signedExpensiveTxn},
+ },
+ ExtraOpcodeBudget: extraOpcodeBudget,
+ },
+ expected: simulation.Result{
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
+ TxnGroups: []simulation.TxnGroupResult{
+ {
+ Txns: []simulation.TxnResult{
+ {
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: transactions.ApplyData{
+ ApplicationID: futureAppID,
+ },
+ },
+ AppBudgetConsumed: 4,
+ },
+ {
+ AppBudgetConsumed: 1404,
+ },
+ },
+ AppBudgetAdded: 1500,
+ AppBudgetConsumed: 1408,
+ },
+ },
+ EvalOverrides: simulation.ResultEvalOverrides{ExtraOpcodeBudget: extraOpcodeBudget},
+ },
+ }
+ })
+}
+
+func TestAppCallWithExtraBudgetOverBudget(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // Transaction group has a cost of 4 + 1404
+ expensiveAppSource := `#pragma version 6
+ txn ApplicationID // [appId]
+ bz end // []
+` + strings.Repeat(`int 1; pop;`, 700) + `end:
+ int 1`
+
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+
+ futureAppID := basics.AppIndex(1001)
+ // App create with cost 4
+ createTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: 0,
+ ApprovalProgram: expensiveAppSource,
+ ClearStateProgram: "#pragma version 6\nint 0",
+ })
+ // Expensive 700 repetition of int 1 and pop total cost 1404
+ expensiveTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: futureAppID,
+ })
+
+ txntest.Group(&createTxn, &expensiveTxn)
+
+ signedCreateTxn := createTxn.Txn().Sign(sender.Sk)
+ signedExpensiveTxn := expensiveTxn.Txn().Sign(sender.Sk)
+ // Add a small bit of extra budget, but not enough
+ extraBudget := uint64(5)
+
+ return simulationTestCase{
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{
+ {signedCreateTxn, signedExpensiveTxn},
+ },
+ ExtraOpcodeBudget: extraBudget,
+ },
+ expectedError: "dynamic cost budget exceeded",
+ expected: simulation.Result{
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
+ TxnGroups: []simulation.TxnGroupResult{
+ {
+ Txns: []simulation.TxnResult{
+ {
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: transactions.ApplyData{
+ ApplicationID: futureAppID,
+ },
+ },
+ AppBudgetConsumed: 4,
+ },
+ {
+ AppBudgetConsumed: 1401,
+ },
+ },
+ FailedAt: simulation.TxnPath{1},
+ AppBudgetAdded: 1405,
+ AppBudgetConsumed: 1405,
+ },
+ },
+ EvalOverrides: simulation.ResultEvalOverrides{ExtraOpcodeBudget: extraBudget},
+ },
+ }
+ })
+}
+
+func TestAppCallWithExtraBudgetExceedsInternalLimit(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // Transaction group has a cost of 4 + 1404
+ expensiveAppSource := `#pragma version 6
+ txn ApplicationID // [appId]
+ bz end // []
+` + strings.Repeat(`int 1; pop;`, 700) + `end:
+ int 1`
+
+ env := simulationtesting.PrepareSimulatorTest(t)
+ defer env.Close()
+ s := simulation.MakeSimulator(env.Ledger)
+
+ sender := env.Accounts[0]
+
+ futureAppID := basics.AppIndex(1001)
+ // App create with cost 4
+ createTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: 0,
+ ApprovalProgram: expensiveAppSource,
+ ClearStateProgram: "#pragma version 6\nint 0",
+ })
+ // Expensive 700 repetition of int 1 and pop total cost 1404
+ expensiveTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: futureAppID,
+ })
+
+ txntest.Group(&createTxn, &expensiveTxn)
+
+ signedCreateTxn := createTxn.Txn().Sign(sender.Sk)
+ signedExpensiveTxn := expensiveTxn.Txn().Sign(sender.Sk)
+
+ // Add an extra budget that is exceeding simulation.MaxExtraOpcodeBudget
+ extraBudget := simulation.MaxExtraOpcodeBudget + 1
+
+ // should error on too high extra budgets
+ _, err := s.Simulate(
+ simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{{signedCreateTxn, signedExpensiveTxn}},
+ ExtraOpcodeBudget: extraBudget,
+ })
+ require.ErrorAs(t, err, &simulation.InvalidRequestError{})
+ require.ErrorContains(t, err, "extra budget 320001 > simulation extra budget limit 320000")
+}
+
+func TestLogicSigOverBudget(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ op, err := logic.AssembleString(`#pragma version 6
+` + strings.Repeat(`byte "a"
+keccak256
+pop
+`, 200) + `int 1`)
+ require.NoError(t, err)
+ program := logic.Program(op.Program)
+ lsigAddr := basics.Address(crypto.HashObj(&program))
+
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+
+ payTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: sender.Addr,
+ Receiver: lsigAddr,
+ Amount: 1_000_000,
+ })
+ appCallTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: lsigAddr,
+ ApprovalProgram: `#pragma version 8
+byte "hello"
+log
+int 1`,
+ ClearStateProgram: `#pragma version 8
+int 1`,
+ })
+
+ txntest.Group(&payTxn, &appCallTxn)
+
+ signedPayTxn := payTxn.Txn().Sign(sender.Sk)
+ signedAppCallTxn := appCallTxn.SignedTxn()
+ signedAppCallTxn.Lsig = transactions.LogicSig{
+ Logic: program,
+ }
+
+ var expectedAppCallAD transactions.ApplyData
+ expectedFailedAt := simulation.TxnPath{1}
+
+ // Opcode cost exceeded, but report current cost of LogicSig before it went over the limit.
+ return simulationTestCase{
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{
+ {signedPayTxn, signedAppCallTxn},
+ },
+ },
+ expectedError: "dynamic cost budget exceeded",
+ expected: simulation.Result{
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
+ TxnGroups: []simulation.TxnGroupResult{
+ {
+ Txns: []simulation.TxnResult{
+ {},
+ {
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: expectedAppCallAD,
+ },
+ AppBudgetConsumed: 0,
+ LogicSigBudgetConsumed: 19934,
+ },
+ },
+ FailedAt: expectedFailedAt,
+ AppBudgetAdded: 0,
+ AppBudgetConsumed: 0,
+ },
+ },
+ },
+ }
+ })
+}
+
+func TestAppAtBudget(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // Transaction has a cost of 700 and invokes an inner transaction
+ exactly700AndCallInner := fmt.Sprintf(`#pragma version 6
+pushint 1
+cover 0 // This is a noop, just to fix an odd number of ops
+%s
+itxn_begin
+int appl
+itxn_field TypeEnum
+byte 0x068101
+dup
+itxn_field ClearStateProgram
+itxn_field ApprovalProgram
+itxn_submit
+`, strings.Repeat(`pushint 1
+pop
+`, 345))
+
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+
+ futureAppID := basics.AppIndex(1002)
+ // fund outer app
+ fund := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: sender.Addr,
+ Receiver: futureAppID.Address(),
+ Amount: 401_000,
+ })
+ // create app
+ appCall := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApprovalProgram: exactly700AndCallInner,
+ ClearStateProgram: `#pragma version 6
+int 1`,
+ })
+
+ txntest.Group(&fund, &appCall)
+
+ signedFundTxn := fund.Txn().Sign(sender.Sk)
+ signedAppCall := appCall.Txn().Sign(sender.Sk)
+
+ return simulationTestCase{
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{
+ {signedFundTxn, signedAppCall},
+ },
+ },
+ expected: simulation.Result{
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
+ TxnGroups: []simulation.TxnGroupResult{
+ {
+ Txns: []simulation.TxnResult{
+ {},
+ {
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: transactions.ApplyData{
+ ApplicationID: futureAppID,
+ EvalDelta: transactions.EvalDelta{
+ InnerTxns: []transactions.SignedTxnWithAD{
+ {
+ ApplyData: transactions.ApplyData{
+ ApplicationID: futureAppID + 1,
+ },
},
},
},
- MissingSignature: !signed,
},
},
- FailedAt: simulation.TxnPath{0},
+ AppBudgetConsumed: 701,
},
},
- WouldSucceed: false,
+ AppBudgetAdded: 1400,
+ AppBudgetConsumed: 701,
},
- }
- })
- })
- }
+ },
+ },
+ }
+ })
}
-func TestSignatureCheck(t *testing.T) {
+// TestDefaultSignatureCheck tests signature checking when SignaturesOption is NOT enabled.
+func TestDefaultSignatureCheck(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- l, accounts, txnInfo := simulationtesting.PrepareSimulatorTest(t)
- defer l.Close()
- s := simulation.MakeSimulator(l)
- sender := accounts[0].Addr
+ env := simulationtesting.PrepareSimulatorTest(t)
+ defer env.Close()
+ s := simulation.MakeSimulator(env.Ledger)
+ sender := env.Accounts[0]
- txgroup := []transactions.SignedTxn{
- txnInfo.NewTxn(txntest.Txn{
- Type: protocol.PaymentTx,
- Sender: sender,
- Receiver: sender,
- Amount: 0,
- }).SignedTxn(),
- }
+ stxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: sender.Addr,
+ Receiver: sender.Addr,
+ Amount: 0,
+ }).SignedTxn()
- // should catch missing signature
- result, err := s.Simulate(txgroup)
+ // should error on missing signature
+ result, err := s.Simulate(simulation.Request{TxnGroups: [][]transactions.SignedTxn{{stxn}}})
require.NoError(t, err)
- require.False(t, result.WouldSucceed)
require.Len(t, result.TxnGroups, 1)
require.Len(t, result.TxnGroups[0].Txns, 1)
- require.True(t, result.TxnGroups[0].Txns[0].MissingSignature)
- require.Zero(t, result.TxnGroups[0].FailureMessage)
+ require.Contains(t, result.TxnGroups[0].FailureMessage, "signedtxn has no sig")
+ require.Equal(t, result.TxnGroups[0].FailedAt, simulation.TxnPath{0})
// add signature
- signatureSecrets := accounts[0].Sk
- txgroup[0] = txgroup[0].Txn.Sign(signatureSecrets)
+ stxn = stxn.Txn.Sign(sender.Sk)
// should not error now that we have a signature
- result, err = s.Simulate(txgroup)
+ result, err = s.Simulate(simulation.Request{TxnGroups: [][]transactions.SignedTxn{{stxn}}})
require.NoError(t, err)
- require.True(t, result.WouldSucceed)
require.Len(t, result.TxnGroups, 1)
require.Len(t, result.TxnGroups[0].Txns, 1)
- require.False(t, result.TxnGroups[0].Txns[0].MissingSignature)
require.Zero(t, result.TxnGroups[0].FailureMessage)
// should error with invalid signature
- txgroup[0].Sig[0] += byte(1) // will wrap if > 255
- result, err = s.Simulate(txgroup)
- require.ErrorAs(t, err, &simulation.InvalidTxGroupError{})
+ stxn.Sig[0] += byte(1) // will wrap if > 255
+ result, err = s.Simulate(simulation.Request{TxnGroups: [][]transactions.SignedTxn{{stxn}}})
+ require.ErrorAs(t, err, &simulation.InvalidRequestError{})
require.ErrorContains(t, err, "one signature didn't pass")
}
@@ -901,25 +1252,227 @@ func TestSignatureCheck(t *testing.T) {
func TestInvalidTxGroup(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ receiver := env.Accounts[0].Addr
- l, accounts, txnInfo := simulationtesting.PrepareSimulatorTest(t)
- defer l.Close()
- s := simulation.MakeSimulator(l)
- receiver := accounts[0].Addr
-
- txgroup := []transactions.SignedTxn{
- txnInfo.NewTxn(txntest.Txn{
- Type: protocol.PaymentTx,
+ txn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.PaymentTx,
+ // should error with invalid transaction group error
Sender: ledgertesting.PoolAddr(),
Receiver: receiver,
Amount: 0,
- }).SignedTxn(),
- }
+ }).SignedTxn()
+
+ return simulationTestCase{
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{{txn}},
+ },
+ expectedError: "transaction from incentive pool is invalid",
+ expected: simulation.Result{
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
+ TxnGroups: []simulation.TxnGroupResult{
+ {
+ FailedAt: simulation.TxnPath{0},
+ Txns: []simulation.TxnResult{{}},
+ },
+ },
+ },
+ }
+ })
+}
+
+// TestLogLimitLiftingInSimulation tests that an app with log calls that exceed limits during normal runtime
+// can get through during simulation with AllowMoreLogging activated
+func TestLogLimitLiftingInSimulation(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ LogTimes := 40
+ LogLongLine := strings.Repeat("a", 1050)
+
+ appSourceThatLogsALot := `#pragma version 8
+txn NumAppArgs
+int 0
+==
+bnz final
+` + strings.Repeat(fmt.Sprintf(`byte "%s"
+log
+`, LogLongLine), LogTimes) + `final:
+int 1`
+
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+ receiver := env.Accounts[1]
+
+ futureAppID := basics.AppIndex(1001)
+
+ createTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: 0,
+ ApprovalProgram: appSourceThatLogsALot,
+ ClearStateProgram: "#pragma version 8\nint 1",
+ })
+
+ callsABunchLogs := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: futureAppID,
+ Accounts: []basics.Address{receiver.Addr},
+ ApplicationArgs: [][]byte{[]byte("first-arg")},
+ })
+
+ txntest.Group(&createTxn, &callsABunchLogs)
+
+ signedCreateTxn := createTxn.Txn().Sign(sender.Sk)
+ signedCallsABunchLogs := callsABunchLogs.Txn().Sign(sender.Sk)
+
+ expectedMaxLogCalls, expectedMaxLogSize := uint64(2048), uint64(65536)
+ expectedLog := make([]string, LogTimes)
+ for i := 0; i < LogTimes; i++ {
+ expectedLog[i] = LogLongLine
+ }
+ return simulationTestCase{
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{
+ {signedCreateTxn, signedCallsABunchLogs},
+ },
+ AllowMoreLogging: true,
+ },
+ expected: simulation.Result{
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
+ TxnGroups: []simulation.TxnGroupResult{
+ {
+ Txns: []simulation.TxnResult{
+ {
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: transactions.ApplyData{
+ ApplicationID: futureAppID,
+ },
+ },
+ AppBudgetConsumed: 6,
+ },
+ {
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: transactions.ApplyData{
+ EvalDelta: transactions.EvalDelta{
+ Logs: expectedLog,
+ },
+ },
+ },
+ AppBudgetConsumed: 86,
+ },
+ },
+ AppBudgetAdded: 1400,
+ AppBudgetConsumed: 92,
+ },
+ },
+ EvalOverrides: simulation.ResultEvalOverrides{
+ MaxLogCalls: &expectedMaxLogCalls,
+ MaxLogSize: &expectedMaxLogSize,
+ },
+ },
+ }
+ })
+}
+
+func TestLogSizeExceedWithLiftInSimulation(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ LogTimes := 65
+ LogLongLine := strings.Repeat("a", 1050)
+
+ appSourceThatLogsALot := `#pragma version 8
+txn NumAppArgs
+int 0
+==
+bnz final
+` + strings.Repeat(fmt.Sprintf(`byte "%s"
+log
+`, LogLongLine), LogTimes) + `final:
+int 1`
+
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+ receiver := env.Accounts[1]
+
+ futureAppID := basics.AppIndex(1001)
+
+ createTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: 0,
+ ApprovalProgram: appSourceThatLogsALot,
+ ClearStateProgram: "#pragma version 8\nint 1",
+ })
+
+ callsABunchLogs := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: futureAppID,
+ Accounts: []basics.Address{receiver.Addr},
+ ApplicationArgs: [][]byte{[]byte("first-arg")},
+ })
- // should error with invalid transaction group error
- _, err := s.Simulate(txgroup)
- require.ErrorAs(t, err, &simulation.InvalidTxGroupError{})
- require.ErrorContains(t, err, "transaction from incentive pool is invalid")
+ txntest.Group(&createTxn, &callsABunchLogs)
+
+ signedCreateTxn := createTxn.Txn().Sign(sender.Sk)
+ signedCallsABunchLogs := callsABunchLogs.Txn().Sign(sender.Sk)
+
+ expectedMaxLogCalls, expectedMaxLogSize := uint64(2048), uint64(65536)
+ actualLogTimes := 65536 / len(LogLongLine)
+ expectedLog := make([]string, actualLogTimes)
+ for i := 0; i < actualLogTimes; i++ {
+ expectedLog[i] = LogLongLine
+ }
+ return simulationTestCase{
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{
+ {signedCreateTxn, signedCallsABunchLogs},
+ },
+ AllowMoreLogging: true,
+ },
+ expected: simulation.Result{
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
+ TxnGroups: []simulation.TxnGroupResult{
+ {
+ FailedAt: simulation.TxnPath{1},
+ Txns: []simulation.TxnResult{
+ {
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: transactions.ApplyData{
+ ApplicationID: futureAppID,
+ },
+ },
+ AppBudgetConsumed: 6,
+ },
+ {
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: transactions.ApplyData{
+ EvalDelta: transactions.EvalDelta{
+ Logs: expectedLog,
+ },
+ },
+ },
+ AppBudgetConsumed: 131,
+ },
+ },
+ AppBudgetAdded: 1400,
+ AppBudgetConsumed: 137,
+ },
+ },
+ EvalOverrides: simulation.ResultEvalOverrides{
+ MaxLogCalls: &expectedMaxLogCalls,
+ MaxLogSize: &expectedMaxLogSize,
+ },
+ },
+ expectedError: "logic eval error: program logs too large. 66150 bytes > 65536 bytes limit.",
+ }
+ })
}
// TestBalanceChangesWithApp sends a payment transaction to a new account and confirms its balance
@@ -927,23 +1480,18 @@ func TestInvalidTxGroup(t *testing.T) {
func TestBalanceChangesWithApp(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
-
- for _, signed := range []bool{true, false} {
- signed := signed
- t.Run(fmt.Sprintf("signed=%t", signed), func(t *testing.T) {
- t.Parallel()
- simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase {
- sender := accounts[0]
- senderBalance := sender.AcctData.MicroAlgos.Raw
- sendAmount := senderBalance - 500_000 // Leave 0.5 Algos in the sender account
- receiver := accounts[1]
- receiverBalance := receiver.AcctData.MicroAlgos.Raw
-
- futureAppID := basics.AppIndex(1)
- createTxn := txnInfo.NewTxn(txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: sender.Addr,
- ApprovalProgram: `#pragma version 6
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+ senderBalance := sender.AcctData.MicroAlgos.Raw
+ sendAmount := senderBalance - 500_000 // Leave 0.5 Algos in the sender account
+ receiver := env.Accounts[1]
+ receiverBalance := receiver.AcctData.MicroAlgos.Raw
+
+ futureAppID := basics.AppIndex(1001)
+ createTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApprovalProgram: `#pragma version 6
txn ApplicationID // [appId]
bz end // []
int 1 // [1]
@@ -955,79 +1503,122 @@ assert
end:
int 1 // [1]
`,
- ClearStateProgram: `#pragma version 6
+ ClearStateProgram: `#pragma version 6
int 1`,
- })
- checkStartingBalanceTxn := txnInfo.NewTxn(txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: sender.Addr,
- ApplicationID: futureAppID,
- Accounts: []basics.Address{receiver.Addr},
- ApplicationArgs: [][]byte{uint64ToBytes(receiverBalance)},
- })
- paymentTxn := txnInfo.NewTxn(txntest.Txn{
+ })
+ checkStartingBalanceTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: futureAppID,
+ Accounts: []basics.Address{receiver.Addr},
+ ApplicationArgs: [][]byte{uint64ToBytes(receiverBalance)},
+ })
+ paymentTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: sender.Addr,
+ Receiver: receiver.Addr,
+ Amount: sendAmount,
+ })
+ checkEndingBalanceTxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.ApplicationCallTx,
+ Sender: sender.Addr,
+ ApplicationID: futureAppID,
+ Accounts: []basics.Address{receiver.Addr},
+ // Receiver's balance should have increased by sendAmount
+ ApplicationArgs: [][]byte{uint64ToBytes(receiverBalance + sendAmount)},
+ })
+
+ txntest.Group(&createTxn, &checkStartingBalanceTxn, &paymentTxn, &checkEndingBalanceTxn)
+
+ signedCreateTxn := createTxn.Txn().Sign(sender.Sk)
+ signedCheckStartingBalanceTxn := checkStartingBalanceTxn.Txn().Sign(sender.Sk)
+ signedPaymentTxn := paymentTxn.Txn().Sign(sender.Sk)
+ signedCheckEndingBalanceTxn := checkEndingBalanceTxn.Txn().Sign(sender.Sk)
+
+ return simulationTestCase{
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{
+ {
+ signedCreateTxn,
+ signedCheckStartingBalanceTxn,
+ signedPaymentTxn,
+ signedCheckEndingBalanceTxn,
+ },
+ },
+ },
+ expected: simulation.Result{
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
+ TxnGroups: []simulation.TxnGroupResult{
+ {
+ Txns: []simulation.TxnResult{
+ {
+ Txn: transactions.SignedTxnWithAD{
+ ApplyData: transactions.ApplyData{
+ ApplicationID: futureAppID,
+ },
+ },
+ AppBudgetConsumed: 4,
+ },
+ {
+ AppBudgetConsumed: 10,
+ },
+ {},
+ {
+ AppBudgetConsumed: 10,
+ },
+ },
+ AppBudgetAdded: 2100,
+ AppBudgetConsumed: 24,
+ },
+ },
+ },
+ }
+ })
+}
+
+// TestOptionalSignatures tests that transactions with signatures and without signatures are both
+// properly handled when AllowEmptySignatures is enabled.
+func TestOptionalSignatures(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ for _, signed := range []bool{true, false} {
+ signed := signed
+ t.Run(fmt.Sprintf("signed=%t", signed), func(t *testing.T) {
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
+
+ txn := env.TxnInfo.NewTxn(txntest.Txn{
Type: protocol.PaymentTx,
Sender: sender.Addr,
- Receiver: receiver.Addr,
- Amount: sendAmount,
- })
- checkEndingBalanceTxn := txnInfo.NewTxn(txntest.Txn{
- Type: protocol.ApplicationCallTx,
- Sender: sender.Addr,
- ApplicationID: futureAppID,
- Accounts: []basics.Address{receiver.Addr},
- // Receiver's balance should have increased by sendAmount
- ApplicationArgs: [][]byte{uint64ToBytes(receiverBalance + sendAmount)},
+ Receiver: sender.Addr,
+ Amount: 1,
})
- txntest.Group(&createTxn, &checkStartingBalanceTxn, &paymentTxn, &checkEndingBalanceTxn)
-
- signedCreateTxn := createTxn.SignedTxn()
- signedCheckStartingBalanceTxn := checkStartingBalanceTxn.SignedTxn()
- signedPaymentTxn := paymentTxn.SignedTxn()
- signedCheckEndingBalanceTxn := checkEndingBalanceTxn.SignedTxn()
-
+ var stxn transactions.SignedTxn
if signed {
- signedCreateTxn = createTxn.Txn().Sign(sender.Sk)
- signedCheckStartingBalanceTxn = checkStartingBalanceTxn.Txn().Sign(sender.Sk)
- signedPaymentTxn = paymentTxn.Txn().Sign(sender.Sk)
- signedCheckEndingBalanceTxn = checkEndingBalanceTxn.Txn().Sign(sender.Sk)
+ stxn = txn.Txn().Sign(sender.Sk)
+ } else {
+ // no signature is included
+ stxn = txn.SignedTxn()
}
return simulationTestCase{
- input: []transactions.SignedTxn{
- signedCreateTxn,
- signedCheckStartingBalanceTxn,
- signedPaymentTxn,
- signedCheckEndingBalanceTxn,
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{{stxn}},
+ AllowEmptySignatures: true,
},
expected: simulation.Result{
- Version: 1,
- LastRound: txnInfo.LatestRound(),
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
TxnGroups: []simulation.TxnGroupResult{
{
- Txns: []simulation.TxnResult{
- {
- Txn: transactions.SignedTxnWithAD{
- ApplyData: transactions.ApplyData{
- ApplicationID: futureAppID,
- },
- },
- MissingSignature: !signed,
- },
- {
- MissingSignature: !signed,
- },
- {
- MissingSignature: !signed,
- },
- {
- MissingSignature: !signed,
- },
- },
+ Txns: []simulation.TxnResult{{}},
},
},
- WouldSucceed: signed,
+ EvalOverrides: simulation.ResultEvalOverrides{
+ AllowEmptySignatures: true,
+ },
},
}
})
@@ -1035,14 +1626,40 @@ int 1`,
}
}
-func TestPartialMissingSignatures(t *testing.T) {
+// TestOptionalSignaturesIncorrect tests that an incorrect signature still fails when
+// AllowEmptySignatures is enabled.
+func TestOptionalSignaturesIncorrect(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase {
- sender := accounts[0]
+ env := simulationtesting.PrepareSimulatorTest(t)
+ defer env.Close()
+ s := simulation.MakeSimulator(env.Ledger)
+ sender := env.Accounts[0]
+
+ stxn := env.TxnInfo.NewTxn(txntest.Txn{
+ Type: protocol.PaymentTx,
+ Sender: sender.Addr,
+ Receiver: sender.Addr,
+ Amount: 0,
+ }).Txn().Sign(sender.Sk)
+
+ // should error with invalid signature
+ stxn.Sig[0] += byte(1) // will wrap if > 255
+ _, err := s.Simulate(simulation.Request{TxnGroups: [][]transactions.SignedTxn{{stxn}}})
+ require.ErrorAs(t, err, &simulation.InvalidRequestError{})
+ require.ErrorContains(t, err, "one signature didn't pass")
+}
+
+// TestPartialMissingSignatures tests that a group of transactions with some signatures missing is
+// handled properly when AllowEmptySignatures is enabled.
+func TestPartialMissingSignatures(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
- txn1 := txnInfo.NewTxn(txntest.Txn{
+ txn1 := env.TxnInfo.NewTxn(txntest.Txn{
Type: protocol.AssetConfigTx,
Sender: sender.Addr,
AssetParams: basics.AssetParams{
@@ -1052,7 +1669,7 @@ func TestPartialMissingSignatures(t *testing.T) {
UnitName: "A",
},
})
- txn2 := txnInfo.NewTxn(txntest.Txn{
+ txn2 := env.TxnInfo.NewTxn(txntest.Txn{
Type: protocol.AssetConfigTx,
Sender: sender.Addr,
AssetParams: basics.AssetParams{
@@ -1070,61 +1687,68 @@ func TestPartialMissingSignatures(t *testing.T) {
signedTxn2 := txn2.Txn().Sign(sender.Sk)
return simulationTestCase{
- input: []transactions.SignedTxn{signedTxn1, signedTxn2},
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{
+ {signedTxn1, signedTxn2},
+ },
+ AllowEmptySignatures: true,
+ },
expected: simulation.Result{
- Version: 1,
- LastRound: txnInfo.LatestRound(),
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
TxnGroups: []simulation.TxnGroupResult{
{
Txns: []simulation.TxnResult{
{
- MissingSignature: true,
Txn: transactions.SignedTxnWithAD{
ApplyData: transactions.ApplyData{
- ConfigAsset: 1,
+ ConfigAsset: 1001,
},
},
}, {
Txn: transactions.SignedTxnWithAD{
ApplyData: transactions.ApplyData{
- ConfigAsset: 2,
+ ConfigAsset: 1002,
},
},
},
},
},
},
- WouldSucceed: false,
+ EvalOverrides: simulation.ResultEvalOverrides{
+ AllowEmptySignatures: true,
+ },
},
}
})
}
// TestPooledFeesAcrossSignedAndUnsigned tests that the simulator's transaction group checks
-// allow for pooled fees across a mix of signed and unsigned transactions.
-// Transaction 1 is a signed transaction with not enough fees paid on its own.
-// Transaction 2 is an unsigned transaction with enough fees paid to cover transaction 1.
+// allow for pooled fees across a mix of signed and unsigned transactions when AllowEmptySignatures is
+// enabled.
+//
+// Transaction 1 is a signed transaction with not enough fees paid on its own.
+// Transaction 2 is an unsigned transaction with enough fees paid to cover transaction 1.
func TestPooledFeesAcrossSignedAndUnsigned(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender1 := env.Accounts[0]
+ sender2 := env.Accounts[1]
- simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase {
- sender1 := accounts[0]
- sender2 := accounts[1]
-
- pay1 := txnInfo.NewTxn(txntest.Txn{
+ pay1 := env.TxnInfo.NewTxn(txntest.Txn{
Type: protocol.PaymentTx,
Sender: sender1.Addr,
Receiver: sender2.Addr,
Amount: 1_000_000,
- Fee: txnInfo.CurrentProtocolParams().MinTxnFee - 100,
+ Fee: env.TxnInfo.CurrentProtocolParams().MinTxnFee - 100,
})
- pay2 := txnInfo.NewTxn(txntest.Txn{
+ pay2 := env.TxnInfo.NewTxn(txntest.Txn{
Type: protocol.PaymentTx,
Sender: sender2.Addr,
Receiver: sender1.Addr,
Amount: 0,
- Fee: txnInfo.CurrentProtocolParams().MinTxnFee + 100,
+ Fee: env.TxnInfo.CurrentProtocolParams().MinTxnFee + 100,
})
txntest.Group(&pay1, &pay2)
@@ -1134,20 +1758,25 @@ func TestPooledFeesAcrossSignedAndUnsigned(t *testing.T) {
signedPay2 := pay2.SignedTxn()
return simulationTestCase{
- input: []transactions.SignedTxn{signedPay1, signedPay2},
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{
+ {signedPay1, signedPay2},
+ },
+ AllowEmptySignatures: true,
+ },
expected: simulation.Result{
- Version: 1,
- LastRound: txnInfo.LatestRound(),
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
TxnGroups: []simulation.TxnGroupResult{
{
Txns: []simulation.TxnResult{
- {}, {
- MissingSignature: true,
- },
+ {}, {},
},
},
},
- WouldSucceed: false,
+ EvalOverrides: simulation.ResultEvalOverrides{
+ AllowEmptySignatures: true,
+ },
},
}
})
@@ -1201,28 +1830,31 @@ func TestAppCallInnerTxnApplyDataOnFail(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase {
- sender := accounts[0]
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
singleInnerLogAndFail := makeProgramToCallInner(t, logAndFail)
nestedInnerLogAndFail := makeProgramToCallInner(t, singleInnerLogAndFail)
+ futureOuterAppID := basics.AppIndex(1003)
+ futureInnerAppID := futureOuterAppID + 1
+
// fund outer app
- pay1 := txnInfo.NewTxn(txntest.Txn{
+ pay1 := env.TxnInfo.NewTxn(txntest.Txn{
Type: protocol.PaymentTx,
Sender: sender.Addr,
- Receiver: basics.AppIndex(3).Address(),
+ Receiver: futureOuterAppID.Address(),
Amount: 401_000, // 400_000 min balance plus 1_000 for 1 txn
})
// fund inner app
- pay2 := txnInfo.NewTxn(txntest.Txn{
+ pay2 := env.TxnInfo.NewTxn(txntest.Txn{
Type: protocol.PaymentTx,
Sender: sender.Addr,
- Receiver: basics.AppIndex(4).Address(),
+ Receiver: futureInnerAppID.Address(),
Amount: 401_000, // 400_000 min balance plus 1_000 for 1 txn
})
// create app
- appCall := txnInfo.NewTxn(txntest.Txn{
+ appCall := env.TxnInfo.NewTxn(txntest.Txn{
Type: protocol.ApplicationCallTx,
Sender: sender.Addr,
ApplicationArgs: [][]byte{uint64ToBytes(uint64(1))},
@@ -1233,35 +1865,39 @@ int 1`,
txgroup := txntest.Group(&pay1, &pay2, &appCall)
+ for i := range txgroup {
+ txgroup[i] = txgroup[i].Txn.Sign(sender.Sk)
+ }
+
return simulationTestCase{
- input: txgroup,
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{txgroup},
+ },
expectedError: "rejected by ApprovalProgram",
expected: simulation.Result{
- Version: 1,
- LastRound: txnInfo.LatestRound(),
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
TxnGroups: []simulation.TxnGroupResult{
{
Txns: []simulation.TxnResult{
+ {},
+ {},
{
- MissingSignature: true,
- }, {
- MissingSignature: true,
- }, {
Txn: transactions.SignedTxnWithAD{
ApplyData: transactions.ApplyData{
- ApplicationID: 3,
+ ApplicationID: futureOuterAppID,
EvalDelta: transactions.EvalDelta{
Logs: []string{"starting inner txn"},
InnerTxns: []transactions.SignedTxnWithAD{
{
ApplyData: transactions.ApplyData{
- ApplicationID: 4,
+ ApplicationID: futureInnerAppID,
EvalDelta: transactions.EvalDelta{
Logs: []string{"starting inner txn"},
InnerTxns: []transactions.SignedTxnWithAD{
{
ApplyData: transactions.ApplyData{
- ApplicationID: 5,
+ ApplicationID: futureInnerAppID + 1,
EvalDelta: transactions.EvalDelta{
Logs: []string{"message"},
},
@@ -1275,13 +1911,14 @@ int 1`,
},
},
},
- MissingSignature: true,
+ AppBudgetConsumed: 27,
},
},
- FailedAt: simulation.TxnPath{2, 0, 0},
+ AppBudgetAdded: 2100,
+ AppBudgetConsumed: 27,
+ FailedAt: simulation.TxnPath{2, 0, 0},
},
},
- WouldSucceed: false,
},
}
})
@@ -1303,21 +1940,23 @@ func TestNonAppCallInnerTxnApplyDataOnFail(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase {
- sender := accounts[0]
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
logAndFailItxnCode := makeItxnSubmitToCallInner(t, logAndFail)
approvalProgram := wrapCodeWithVersionAndReturn(createAssetCode + logAndFailItxnCode)
+ futureAppID := basics.AppIndex(1002)
+
// fund outer app
- pay1 := txnInfo.NewTxn(txntest.Txn{
+ pay1 := env.TxnInfo.NewTxn(txntest.Txn{
Type: protocol.PaymentTx,
Sender: sender.Addr,
- Receiver: basics.AppIndex(2).Address(),
+ Receiver: futureAppID.Address(),
Amount: 401_000, // 400_000 min balance plus 1_000 for 1 txn
})
// create app
- appCall := txnInfo.NewTxn(txntest.Txn{
+ appCall := env.TxnInfo.NewTxn(txntest.Txn{
Type: protocol.ApplicationCallTx,
Sender: sender.Addr,
ApplicationArgs: [][]byte{uint64ToBytes(uint64(1))},
@@ -1328,32 +1967,37 @@ int 1`,
txgroup := txntest.Group(&pay1, &appCall)
+ for i := range txgroup {
+ txgroup[i] = txgroup[i].Txn.Sign(sender.Sk)
+ }
+
return simulationTestCase{
- input: txgroup,
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{txgroup},
+ },
expectedError: "rejected by ApprovalProgram",
expected: simulation.Result{
- Version: 1,
- LastRound: txnInfo.LatestRound(),
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
TxnGroups: []simulation.TxnGroupResult{
{
Txns: []simulation.TxnResult{
+ {},
{
- MissingSignature: true,
- }, {
Txn: transactions.SignedTxnWithAD{
ApplyData: transactions.ApplyData{
- ApplicationID: 2,
+ ApplicationID: futureAppID,
EvalDelta: transactions.EvalDelta{
Logs: []string{"starting asset create", "finished asset create", "starting inner txn"},
InnerTxns: []transactions.SignedTxnWithAD{
{
ApplyData: transactions.ApplyData{
- ConfigAsset: 3,
+ ConfigAsset: basics.AssetIndex(futureAppID) + 1,
},
},
{
ApplyData: transactions.ApplyData{
- ApplicationID: 4,
+ ApplicationID: futureAppID + 2,
EvalDelta: transactions.EvalDelta{
Logs: []string{"message"},
},
@@ -1363,13 +2007,14 @@ int 1`,
},
},
},
- MissingSignature: true,
+ AppBudgetConsumed: 23,
},
},
- FailedAt: simulation.TxnPath{1, 1},
+ AppBudgetAdded: 2100,
+ AppBudgetConsumed: 23,
+ FailedAt: simulation.TxnPath{1, 1},
},
},
- WouldSucceed: false,
},
}
})
@@ -1392,22 +2037,24 @@ log
func TestInnerTxnNonAppCallFailure(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
- simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase {
- sender := accounts[0]
+ futureAppID := basics.AppIndex(1002)
+ futureAssetID := basics.AssetIndex(1003)
// configAssetCode should fail because createAssetCode does not set an asset manager
- approvalProgram := wrapCodeWithVersionAndReturn(createAssetCode + fmt.Sprintf(configAssetCode, 3))
+ approvalProgram := wrapCodeWithVersionAndReturn(createAssetCode + fmt.Sprintf(configAssetCode, futureAssetID))
// fund outer app
- pay1 := txnInfo.NewTxn(txntest.Txn{
+ pay1 := env.TxnInfo.NewTxn(txntest.Txn{
Type: protocol.PaymentTx,
Sender: sender.Addr,
- Receiver: basics.AppIndex(2).Address(),
+ Receiver: futureAppID.Address(),
Amount: 402_000, // 400_000 min balance plus 2_000 for 2 inners
})
// create app
- appCall := txnInfo.NewTxn(txntest.Txn{
+ appCall := env.TxnInfo.NewTxn(txntest.Txn{
Type: protocol.ApplicationCallTx,
Sender: sender.Addr,
ApplicationArgs: [][]byte{uint64ToBytes(uint64(1))},
@@ -1418,27 +2065,32 @@ int 1`,
txgroup := txntest.Group(&pay1, &appCall)
+ for i := range txgroup {
+ txgroup[i] = txgroup[i].Txn.Sign(sender.Sk)
+ }
+
return simulationTestCase{
- input: txgroup,
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{txgroup},
+ },
expectedError: "logic eval error: this transaction should be issued by the manager",
expected: simulation.Result{
- Version: 1,
- LastRound: txnInfo.LatestRound(),
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
TxnGroups: []simulation.TxnGroupResult{
{
Txns: []simulation.TxnResult{
+ {},
{
- MissingSignature: true,
- }, {
Txn: transactions.SignedTxnWithAD{
ApplyData: transactions.ApplyData{
- ApplicationID: 2,
+ ApplicationID: futureAppID,
EvalDelta: transactions.EvalDelta{
Logs: []string{"starting asset create", "finished asset create", "starting asset config"},
InnerTxns: []transactions.SignedTxnWithAD{
{
ApplyData: transactions.ApplyData{
- ConfigAsset: 3,
+ ConfigAsset: futureAssetID,
},
},
{},
@@ -1446,13 +2098,14 @@ int 1`,
},
},
},
- MissingSignature: true,
+ AppBudgetConsumed: 17,
},
},
- FailedAt: simulation.TxnPath{1, 1},
+ AppBudgetAdded: 2100,
+ AppBudgetConsumed: 17,
+ FailedAt: simulation.TxnPath{1, 1},
},
},
- WouldSucceed: false,
},
}
})
@@ -1468,17 +2121,17 @@ func TestMockTracerScenarios(t *testing.T) {
scenarioFn := scenarioFn
t.Run(name, func(t *testing.T) {
t.Parallel()
- simulationTest(t, func(accounts []simulationtesting.Account, txnInfo simulationtesting.TxnInfo) simulationTestCase {
- sender := accounts[0]
+ simulationTest(t, func(env simulationtesting.Environment) simulationTestCase {
+ sender := env.Accounts[0]
- futureAppID := basics.AppIndex(2)
- payTxn := txnInfo.NewTxn(txntest.Txn{
+ futureAppID := basics.AppIndex(1002)
+ payTxn := env.TxnInfo.NewTxn(txntest.Txn{
Type: protocol.PaymentTx,
Sender: sender.Addr,
Receiver: futureAppID.Address(),
Amount: 2_000_000,
})
- appCallTxn := txnInfo.NewTxn(txntest.Txn{
+ appCallTxn := env.TxnInfo.NewTxn(txntest.Txn{
Type: protocol.ApplicationCallTx,
Sender: sender.Addr,
ClearStateProgram: `#pragma version 6
@@ -1486,7 +2139,7 @@ func TestMockTracerScenarios(t *testing.T) {
})
scenario := scenarioFn(mocktracer.TestScenarioInfo{
CallingTxn: appCallTxn.Txn(),
- MinFee: basics.MicroAlgos{Raw: txnInfo.CurrentProtocolParams().MinTxnFee},
+ MinFee: basics.MicroAlgos{Raw: env.TxnInfo.CurrentProtocolParams().MinTxnFee},
CreatedAppID: futureAppID,
})
appCallTxn.ApprovalProgram = scenario.Program
@@ -1502,26 +2155,34 @@ func TestMockTracerScenarios(t *testing.T) {
expectedFailedAt[0]++
}
expected := simulation.Result{
- Version: 1,
- LastRound: txnInfo.LatestRound(),
+ Version: simulation.ResultLatestVersion,
+ LastRound: env.TxnInfo.LatestRound(),
TxnGroups: []simulation.TxnGroupResult{
{
- FailedAt: expectedFailedAt,
+ AppBudgetAdded: scenario.AppBudgetAdded,
+ AppBudgetConsumed: scenario.AppBudgetConsumed,
+ FailedAt: expectedFailedAt,
Txns: []simulation.TxnResult{
- {},
+ {
+ AppBudgetConsumed: scenario.TxnAppBudgetConsumed[0],
+ },
{
Txn: transactions.SignedTxnWithAD{
ApplyData: scenario.ExpectedSimulationAD,
},
+ AppBudgetConsumed: scenario.TxnAppBudgetConsumed[1],
},
},
},
},
- WouldSucceed: scenario.Outcome == mocktracer.ApprovalOutcome,
}
return simulationTestCase{
- input: []transactions.SignedTxn{signedPayTxn, signedAppCallTxn},
+ input: simulation.Request{
+ TxnGroups: [][]transactions.SignedTxn{
+ {signedPayTxn, signedAppCallTxn},
+ },
+ },
expectedError: scenario.ExpectedError,
expected: expected,
}
diff --git a/ledger/simulation/simulator.go b/ledger/simulation/simulator.go
index 5ae93b019..1bcd3a5ba 100644
--- a/ledger/simulation/simulator.go
+++ b/ledger/simulation/simulator.go
@@ -18,6 +18,7 @@ package simulation
import (
"errors"
+ "fmt"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data"
@@ -36,6 +37,14 @@ type simulatorLedger struct {
start basics.Round
}
+// Request packs simulation related txn-group(s), and configurations that are overlapping the ones in real transactions.
+type Request struct {
+ TxnGroups [][]transactions.SignedTxn
+ AllowEmptySignatures bool
+ AllowMoreLogging bool
+ ExtraOpcodeBudget uint64
+}
+
// Latest is part of the LedgerForSimulator interface.
// We override this to use the set latest to prevent racing with the network
func (l simulatorLedger) Latest() basics.Round {
@@ -64,8 +73,8 @@ func (s SimulatorError) Unwrap() error {
return s.err
}
-// InvalidTxGroupError occurs when an invalid transaction group was submitted to the simulator.
-type InvalidTxGroupError struct {
+// InvalidRequestError occurs when an invalid transaction group was submitted to the simulator.
+type InvalidRequestError struct {
SimulatorError
}
@@ -104,30 +113,27 @@ var proxySigner = crypto.PrivateKey{
// check verifies that the transaction is well-formed and has valid or missing signatures.
// An invalid transaction group error is returned if the transaction is not well-formed or there are invalid signatures.
// To make things easier, we support submitting unsigned transactions and will respond whether signatures are missing.
-func (s Simulator) check(hdr bookkeeping.BlockHeader, txgroup []transactions.SignedTxn, debugger logic.EvalTracer) ([]int, error) {
+func (s Simulator) check(hdr bookkeeping.BlockHeader, txgroup []transactions.SignedTxn, tracer logic.EvalTracer, overrides ResultEvalOverrides) error {
proxySignerSecrets, err := crypto.SecretKeyToSignatureSecrets(proxySigner)
if err != nil {
- return nil, err
+ return err
}
- // Find and prep any transactions that are missing signatures. We will modify a copy of these
- // transactions to pass signature verification. The modifications will not affect the input
- // txgroup slice.
+ // If signaturesOptional is enabled, find and prep any transactions that are missing signatures.
+ // We will modify a copy of these transactions to pass signature verification. The modifications
+ // will not affect the input txgroup slice.
//
// Note: currently we only support missing transaction signatures, but it should be possible to
// support unsigned delegated LogicSigs as well. A single-signature unsigned delegated LogicSig
// is indistinguishable from an escrow LogicSig, so we would need to decide on another way of
// denoting that a LogicSig's delegation signature is omitted, e.g. by setting all the bits of
// the signature.
- missingSigs := make([]int, 0, len(txgroup))
txnsToVerify := make([]transactions.SignedTxn, len(txgroup))
for i, stxn := range txgroup {
if stxn.Txn.Type == protocol.StateProofTx {
- return nil, errors.New("cannot simulate StateProof transactions")
+ return errors.New("cannot simulate StateProof transactions")
}
- if txnHasNoSignature(stxn) {
- missingSigs = append(missingSigs, i)
-
+ if overrides.AllowEmptySignatures && txnHasNoSignature(stxn) {
// Replace the signed txn with one signed by the proxySigner. At evaluation this would
// raise an error, since the proxySigner's public key likely does not have authority
// over the sender's account. However, this will pass validation, since the signature
@@ -139,21 +145,20 @@ func (s Simulator) check(hdr bookkeeping.BlockHeader, txgroup []transactions.Sig
}
// Verify the signed transactions are well-formed and have valid signatures
- _, err = verify.TxnGroupWithTracer(txnsToVerify, &hdr, nil, s.ledger, debugger)
+ _, err = verify.TxnGroupWithTracer(txnsToVerify, &hdr, nil, s.ledger, tracer)
if err != nil {
- err = InvalidTxGroupError{SimulatorError{err}}
+ err = InvalidRequestError{SimulatorError{err}}
}
- return missingSigs, err
+ return err
}
func (s Simulator) evaluate(hdr bookkeeping.BlockHeader, stxns []transactions.SignedTxn, tracer logic.EvalTracer) (*ledgercore.ValidatedBlock, error) {
// s.ledger has 'StartEvaluator' because *data.Ledger is embedded in the simulatorLedger
// and data.Ledger embeds *ledger.Ledger
- eval, err := s.ledger.StartEvaluator(hdr, len(stxns), 0)
+ eval, err := s.ledger.StartEvaluator(hdr, len(stxns), 0, tracer)
if err != nil {
return nil, err
}
- eval.Tracer = tracer
group := transactions.WrapSignedTxnsWithAD(stxns)
@@ -171,36 +176,59 @@ func (s Simulator) evaluate(hdr bookkeeping.BlockHeader, stxns []transactions.Si
return vb, nil
}
-func (s Simulator) simulateWithTracer(txgroup []transactions.SignedTxn, tracer logic.EvalTracer) (*ledgercore.ValidatedBlock, []int, error) {
+func (s Simulator) simulateWithTracer(txgroup []transactions.SignedTxn, tracer logic.EvalTracer, overrides ResultEvalOverrides) (*ledgercore.ValidatedBlock, error) {
prevBlockHdr, err := s.ledger.BlockHdr(s.ledger.start)
if err != nil {
- return nil, nil, err
+ return nil, err
}
nextBlock := bookkeeping.MakeBlock(prevBlockHdr)
hdr := nextBlock.BlockHeader
// check that the transaction is well-formed and mark whether signatures are missing
- missingSignatures, err := s.check(hdr, txgroup, tracer)
+ err = s.check(hdr, txgroup, tracer, overrides)
if err != nil {
- return nil, missingSignatures, err
+ return nil, err
+ }
+
+ // check that the extra budget is not exceeding simulation extra budget limit
+ if overrides.ExtraOpcodeBudget > MaxExtraOpcodeBudget {
+ return nil,
+ InvalidRequestError{
+ SimulatorError{
+ fmt.Errorf(
+ "extra budget %d > simulation extra budget limit %d",
+ overrides.ExtraOpcodeBudget, MaxExtraOpcodeBudget),
+ },
+ }
}
vb, err := s.evaluate(hdr, txgroup, tracer)
- return vb, missingSignatures, err
+ return vb, err
}
// Simulate simulates a transaction group using the simulator. Will error if the transaction group is not well-formed.
-func (s Simulator) Simulate(txgroup []transactions.SignedTxn) (Result, error) {
- simulatorTracer := makeEvalTracer(s.ledger.start, txgroup)
- block, missingSigIndexes, err := s.simulateWithTracer(txgroup, simulatorTracer)
- if err != nil {
- simulatorTracer.result.WouldSucceed = false
+func (s Simulator) Simulate(simulateRequest Request) (Result, error) {
+ simulatorTracer := makeEvalTracer(s.ledger.start, simulateRequest)
+
+ if len(simulateRequest.TxnGroups) != 1 {
+ return Result{}, InvalidRequestError{
+ SimulatorError{
+ err: fmt.Errorf("expected 1 transaction group, got %d", len(simulateRequest.TxnGroups)),
+ },
+ }
+ }
- var lsigError verify.LogicSigError
+ block, err := s.simulateWithTracer(simulateRequest.TxnGroups[0], simulatorTracer, simulatorTracer.result.EvalOverrides)
+ if err != nil {
+ var verifyError *verify.TxGroupError
switch {
- case errors.As(err, &lsigError):
- simulatorTracer.result.TxnGroups[0].FailureMessage = lsigError.Error()
- simulatorTracer.result.TxnGroups[0].FailedAt = TxnPath{uint64(lsigError.GroupIndex)}
+ case errors.As(err, &verifyError):
+ if verifyError.GroupIndex < 0 {
+ // This group failed verification, but the problem can't be blamed on a single transaction.
+ return Result{}, InvalidRequestError{SimulatorError{err}}
+ }
+ simulatorTracer.result.TxnGroups[0].FailureMessage = verifyError.Error()
+ simulatorTracer.result.TxnGroups[0].FailedAt = TxnPath{uint64(verifyError.GroupIndex)}
case errors.As(err, &EvalFailureError{}):
simulatorTracer.result.TxnGroups[0].FailureMessage = err.Error()
simulatorTracer.result.TxnGroups[0].FailedAt = simulatorTracer.failedAt
@@ -212,11 +240,12 @@ func (s Simulator) Simulate(txgroup []transactions.SignedTxn) (Result, error) {
simulatorTracer.result.Block = block
- // mark whether signatures are missing
- for _, index := range missingSigIndexes {
- simulatorTracer.result.TxnGroups[0].Txns[index].MissingSignature = true
- simulatorTracer.result.WouldSucceed = false
+ // Update total cost by aggregating individual txn costs
+ totalCost := uint64(0)
+ for _, txn := range simulatorTracer.result.TxnGroups[0].Txns {
+ totalCost += txn.AppBudgetConsumed
}
+ simulatorTracer.result.TxnGroups[0].AppBudgetConsumed = totalCost
return *simulatorTracer.result, nil
}
diff --git a/ledger/simulation/simulator_test.go b/ledger/simulation/simulator_test.go
index 856fabe25..90eada3f4 100644
--- a/ledger/simulation/simulator_test.go
+++ b/ledger/simulation/simulator_test.go
@@ -22,11 +22,13 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/transactions/logic/mocktracer"
"github.com/algorand/go-algorand/data/txntest"
- "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/eval"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
simulationtesting "github.com/algorand/go-algorand/ledger/simulation/testing"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
@@ -40,7 +42,7 @@ func TestNonOverridenDataLedgerMethodsUseRoundParameter(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- l, _, _ := simulationtesting.PrepareSimulatorTest(t)
+ env := simulationtesting.PrepareSimulatorTest(t)
// methods overriden by `simulatorLedger``
overridenMethods := []string{
@@ -71,7 +73,7 @@ func TestNonOverridenDataLedgerMethodsUseRoundParameter(t *testing.T) {
}
methodExistsInEvalLedger := func(methodName string) bool {
- evalLedgerType := reflect.TypeOf((*internal.LedgerForEvaluator)(nil)).Elem()
+ evalLedgerType := reflect.TypeOf((*eval.LedgerForEvaluator)(nil)).Elem()
for i := 0; i < evalLedgerType.NumMethod(); i++ {
if evalLedgerType.Method(i).Name == methodName {
return true
@@ -89,7 +91,7 @@ func TestNonOverridenDataLedgerMethodsUseRoundParameter(t *testing.T) {
return false
}
- ledgerType := reflect.TypeOf(l)
+ ledgerType := reflect.TypeOf(env.Ledger)
for i := 0; i < ledgerType.NumMethod(); i++ {
method := ledgerType.Method(i)
if methodExistsInEvalLedger(method.Name) && !methodIsSkipped(method.Name) {
@@ -104,10 +106,10 @@ func TestSimulateWithTrace(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- l, accounts, txnInfo := simulationtesting.PrepareSimulatorTest(t)
- defer l.Close()
- s := MakeSimulator(l)
- sender := accounts[0]
+ env := simulationtesting.PrepareSimulatorTest(t)
+ defer env.Close()
+ s := MakeSimulator(env.Ledger)
+ sender := env.Accounts[0]
op, err := logic.AssembleString(`#pragma version 8
int 1`)
@@ -115,13 +117,13 @@ int 1`)
program := logic.Program(op.Program)
lsigAddr := basics.Address(crypto.HashObj(&program))
- payTxn := txnInfo.NewTxn(txntest.Txn{
+ payTxn := env.TxnInfo.NewTxn(txntest.Txn{
Type: protocol.PaymentTx,
Sender: sender.Addr,
Receiver: lsigAddr,
Amount: 1_000_000,
})
- appCallTxn := txnInfo.NewTxn(txntest.Txn{
+ appCallTxn := env.TxnInfo.NewTxn(txntest.Txn{
Type: protocol.ApplicationCallTx,
Sender: lsigAddr,
ApprovalProgram: `#pragma version 8
@@ -139,11 +141,80 @@ int 1`,
txgroup := []transactions.SignedTxn{signedPayTxn, signedAppCallTxn}
mockTracer := &mocktracer.Tracer{}
- block, _, err := s.simulateWithTracer(txgroup, mockTracer)
+ block, err := s.simulateWithTracer(txgroup, mockTracer, ResultEvalOverrides{})
require.NoError(t, err)
- payset := block.Block().Payset
- require.Len(t, payset, 2)
+ evalBlock := block.Block()
+ require.Len(t, evalBlock.Payset, 2)
+
+ expectedSenderData := ledgercore.ToAccountData(sender.AcctData)
+ expectedSenderData.MicroAlgos.Raw -= signedPayTxn.Txn.Amount.Raw + signedPayTxn.Txn.Fee.Raw
+ expectedLsigData := ledgercore.AccountData{}
+ expectedLsigData.MicroAlgos.Raw += signedPayTxn.Txn.Amount.Raw - signedAppCallTxn.Txn.Fee.Raw
+ expectedLsigData.TotalAppParams = 1
+ expectedFeeSinkData := ledgercore.ToAccountData(env.FeeSinkAccount.AcctData)
+ expectedFeeSinkData.MicroAlgos.Raw += signedPayTxn.Txn.Fee.Raw + signedAppCallTxn.Txn.Fee.Raw
+
+ expectedAppID := evalBlock.Payset[1].ApplyData.ApplicationID
+ expectedAppParams := ledgercore.AppParamsDelta{
+ Params: &basics.AppParams{
+ ApprovalProgram: signedAppCallTxn.Txn.ApprovalProgram,
+ ClearStateProgram: signedAppCallTxn.Txn.ClearStateProgram,
+ },
+ }
+
+ // Cannot use evalBlock directly because the tracer is called before many block details are finalized
+ expectedBlockHeader := bookkeeping.MakeBlock(env.TxnInfo.LatestHeader).BlockHeader
+ expectedBlockHeader.TimeStamp = evalBlock.TimeStamp
+ expectedBlockHeader.RewardsLevel = evalBlock.RewardsLevel
+ expectedBlockHeader.RewardsResidue = evalBlock.RewardsResidue
+ expectedBlockHeader.RewardsRate = evalBlock.RewardsRate
+ expectedBlockHeader.RewardsRecalculationRound = evalBlock.RewardsRecalculationRound
+
+ expectedDelta := ledgercore.StateDelta{
+ Accts: ledgercore.AccountDeltas{
+ Accts: []ledgercore.BalanceRecord{
+ {
+ Addr: sender.Addr,
+ AccountData: expectedSenderData,
+ },
+ {
+ Addr: env.FeeSinkAccount.Addr,
+ AccountData: expectedFeeSinkData,
+ },
+ {
+ Addr: lsigAddr,
+ AccountData: expectedLsigData,
+ },
+ },
+ AppResources: []ledgercore.AppResourceRecord{
+ {
+ Aidx: expectedAppID,
+ Addr: lsigAddr,
+ Params: expectedAppParams,
+ },
+ },
+ },
+ Creatables: map[basics.CreatableIndex]ledgercore.ModifiedCreatable{
+ basics.CreatableIndex(expectedAppID): {
+ Ctype: basics.AppCreatable,
+ Created: true,
+ Creator: lsigAddr,
+ },
+ },
+ Txids: map[transactions.Txid]ledgercore.IncludedTransactions{
+ signedPayTxn.Txn.ID(): {
+ LastValid: signedPayTxn.Txn.LastValid,
+ Intra: 0,
+ },
+ signedAppCallTxn.Txn.ID(): {
+ LastValid: signedAppCallTxn.Txn.LastValid,
+ Intra: 1,
+ },
+ },
+ Hdr: &expectedBlockHeader,
+ PrevTimestamp: env.TxnInfo.LatestHeader.TimeStamp,
+ }
expectedEvents := []mocktracer.Event{
// LogicSig evaluation
@@ -152,16 +223,19 @@ int 1`,
mocktracer.AfterOpcode(false),
mocktracer.AfterProgram(logic.ModeSig, false),
// Txn evaluation
+ mocktracer.BeforeBlock(block.Block().Round()),
mocktracer.BeforeTxnGroup(2),
mocktracer.BeforeTxn(protocol.PaymentTx),
- mocktracer.AfterTxn(protocol.PaymentTx, payset[0].ApplyData, false),
+ mocktracer.AfterTxn(protocol.PaymentTx, evalBlock.Payset[0].ApplyData, false),
mocktracer.BeforeTxn(protocol.ApplicationCallTx),
mocktracer.BeforeProgram(logic.ModeApp),
mocktracer.BeforeOpcode(),
mocktracer.AfterOpcode(false),
mocktracer.AfterProgram(logic.ModeApp, false),
- mocktracer.AfterTxn(protocol.ApplicationCallTx, payset[1].ApplyData, false),
- mocktracer.AfterTxnGroup(2, false),
+ mocktracer.AfterTxn(protocol.ApplicationCallTx, evalBlock.Payset[1].ApplyData, false),
+ mocktracer.AfterTxnGroup(2, &expectedDelta, false),
+ //Block evaluation
+ mocktracer.AfterBlock(block.Block().Round()),
}
- require.Equal(t, expectedEvents, mockTracer.Events)
+ mocktracer.AssertEventsEqual(t, expectedEvents, mockTracer.Events)
}
diff --git a/ledger/simulation/testing/utils.go b/ledger/simulation/testing/utils.go
index a18a791e1..0ff0d529f 100644
--- a/ledger/simulation/testing/utils.go
+++ b/ledger/simulation/testing/utils.go
@@ -76,9 +76,25 @@ func (info TxnInfo) InnerTxn(parent transactions.SignedTxn, inner txntest.Txn) t
return inner
}
-// PrepareSimulatorTest creates an environment to test transaction simulations
-func PrepareSimulatorTest(t *testing.T) (l *data.Ledger, accounts []Account, txnInfo TxnInfo) {
- genesisInitState, keys := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+// Environment contains the ledger and testing environment for transaction simulations
+type Environment struct {
+ Ledger *data.Ledger
+ // Accounts is a list of all accounts in the ledger, excluding the fee sink and rewards pool
+ Accounts []Account
+ FeeSinkAccount Account
+ RewardsPoolAccount Account
+ TxnInfo TxnInfo
+}
+
+// Close reclaims resources used by the testing environment
+func (env *Environment) Close() {
+ env.Ledger.Close()
+}
+
+// PrepareSimulatorTest creates an environment to test transaction simulations. The caller is
+// responsible for calling Close() on the returned environment.
+func PrepareSimulatorTest(t *testing.T) Environment {
+ genesisInitState, keys := ledgertesting.GenerateInitState(t, protocol.ConsensusFuture, 100)
// Prepare ledger
const inMem = true
@@ -89,28 +105,33 @@ func PrepareSimulatorTest(t *testing.T) (l *data.Ledger, accounts []Account, txn
realLedger, err := ledger.OpenLedger(log, t.Name(), inMem, genesisInitState, cfg)
require.NoError(t, err, "could not open ledger")
- l = &data.Ledger{Ledger: realLedger}
- require.NotNil(t, l)
+ ledger := &data.Ledger{Ledger: realLedger}
// Reformat accounts
- accounts = make([]Account, len(keys)-2) // -2 for pool and sink accounts
- i := 0
+ accounts := make([]Account, 0, len(keys)-2) // -2 for pool and sink accounts
+ var feeSinkAccount Account
+ var rewardsPoolAccount Account
for addr, key := range keys {
- if addr == ledgertesting.PoolAddr() || addr == ledgertesting.SinkAddr() {
- continue
- }
-
- acctData := genesisInitState.Accounts[addr]
- accounts[i] = Account{
+ account := Account{
Addr: addr,
Sk: key,
- AcctData: acctData,
+ AcctData: genesisInitState.Accounts[addr],
}
- i++
+
+ if addr == ledgertesting.SinkAddr() {
+ feeSinkAccount = account
+ continue
+ }
+ if addr == ledgertesting.PoolAddr() {
+ rewardsPoolAccount = account
+ continue
+ }
+
+ accounts = append(accounts, account)
}
- latest := l.Latest()
- latestHeader, err := l.BlockHdr(latest)
+ latest := ledger.Latest()
+ latestHeader, err := ledger.BlockHdr(latest)
require.NoError(t, err)
rand.Seed(time.Now().UnixNano())
@@ -119,17 +140,22 @@ func PrepareSimulatorTest(t *testing.T) (l *data.Ledger, accounts []Account, txn
numBlocks := rand.Intn(4)
for i := 0; i < numBlocks; i++ {
nextBlock := bookkeeping.MakeBlock(latestHeader)
- err = l.AddBlock(nextBlock, agreement.Certificate{})
+ nextBlock.TxnCounter = latestHeader.TxnCounter
+ err = ledger.AddBlock(nextBlock, agreement.Certificate{})
require.NoError(t, err)
// round has advanced by 1
- require.Equal(t, latest+1, l.Latest())
+ require.Equal(t, latest+1, ledger.Latest())
latest++
latestHeader = nextBlock.BlockHeader
}
- txnInfo = TxnInfo{latestHeader}
-
- return
+ return Environment{
+ Ledger: ledger,
+ Accounts: accounts,
+ FeeSinkAccount: feeSinkAccount,
+ RewardsPoolAccount: rewardsPoolAccount,
+ TxnInfo: TxnInfo{latestHeader},
+ }
}
diff --git a/ledger/simulation/trace.go b/ledger/simulation/trace.go
index d9c2e5f6a..1c112928c 100644
--- a/ledger/simulation/trace.go
+++ b/ledger/simulation/trace.go
@@ -19,8 +19,10 @@ package simulation
import (
"fmt"
+ "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/ledger/ledgercore"
)
@@ -30,17 +32,23 @@ type TxnPath []uint64
// TxnResult contains the simulation result for a single transaction
type TxnResult struct {
- Txn transactions.SignedTxnWithAD
- MissingSignature bool
+ Txn transactions.SignedTxnWithAD
+ AppBudgetConsumed uint64
+ LogicSigBudgetConsumed uint64
}
// TxnGroupResult contains the simulation result for a single transaction group
type TxnGroupResult struct {
- Txns []TxnResult
+ Txns []TxnResult
+ // FailureMessage will be the error message for the first transaction in the group which errors.
+ // If the group succeeds, this will be empty.
FailureMessage string
-
// FailedAt is the path to the txn that failed inside of this group
FailedAt TxnPath
+ // AppBudgetAdded is the total opcode budget for this group
+ AppBudgetAdded uint64
+ // AppBudgetConsumed is the total opcode cost used for this group
+ AppBudgetConsumed uint64
}
func makeTxnGroupResult(txgroup []transactions.SignedTxn) TxnGroupResult {
@@ -54,38 +62,82 @@ func makeTxnGroupResult(txgroup []transactions.SignedTxn) TxnGroupResult {
}
// ResultLatestVersion is the latest version of the Result struct
-const ResultLatestVersion = uint64(1)
+const ResultLatestVersion = uint64(2)
+
+// ResultEvalOverrides contains the limits and parameters during a call to Simulator.Simulate
+type ResultEvalOverrides struct {
+ AllowEmptySignatures bool
+ MaxLogCalls *uint64
+ MaxLogSize *uint64
+ ExtraOpcodeBudget uint64
+}
+
+// LogBytesLimit hardcode limit of how much bytes one can log per transaction during simulation (with AllowMoreLogging)
+const LogBytesLimit = uint64(65536)
+
+// MaxExtraOpcodeBudget hardcode limit of how much extra budget one can add to one transaction group (which is group-size * logic-sig-budget)
+const MaxExtraOpcodeBudget = uint64(20000 * 16)
+
+// AllowMoreLogging method modify the log limits from lift option:
+// - if lift log limits, then overload result from local Config
+// - otherwise, set `LogLimits` field to be nil
+func (eo ResultEvalOverrides) AllowMoreLogging(allow bool) ResultEvalOverrides {
+ if allow {
+ maxLogCalls, maxLogSize := uint64(config.MaxLogCalls), LogBytesLimit
+ eo.MaxLogCalls = &maxLogCalls
+ eo.MaxLogSize = &maxLogSize
+ }
+ return eo
+}
+
+// LogicEvalConstants method infers the logic.EvalConstants from Result.EvalOverrides (*ResultEvalOverrides)
+// and generate appropriate parameters to override during simulation runtime.
+func (eo ResultEvalOverrides) LogicEvalConstants() logic.EvalConstants {
+ logicEvalConstants := logic.RuntimeEvalConstants()
+ if eo.MaxLogSize != nil {
+ logicEvalConstants.MaxLogSize = *eo.MaxLogSize
+ }
+ if eo.MaxLogCalls != nil {
+ logicEvalConstants.MaxLogCalls = *eo.MaxLogCalls
+ }
+ return logicEvalConstants
+}
// Result contains the result from a call to Simulator.Simulate
type Result struct {
- Version uint64
- LastRound basics.Round
- TxnGroups []TxnGroupResult // this is a list so that supporting multiple in the future is not breaking
- WouldSucceed bool // true iff no failure message, no missing signatures, and the budget was not exceeded
- Block *ledgercore.ValidatedBlock
+ Version uint64
+ LastRound basics.Round
+ TxnGroups []TxnGroupResult // this is a list so that supporting multiple in the future is not breaking
+ EvalOverrides ResultEvalOverrides
+ Block *ledgercore.ValidatedBlock
}
-func makeSimulationResultWithVersion(lastRound basics.Round, txgroups [][]transactions.SignedTxn, version uint64) (Result, error) {
+func makeSimulationResultWithVersion(lastRound basics.Round, request Request, version uint64) (Result, error) {
if version != ResultLatestVersion {
return Result{}, fmt.Errorf("invalid SimulationResult version: %d", version)
}
- groups := make([]TxnGroupResult, len(txgroups))
+ groups := make([]TxnGroupResult, len(request.TxnGroups))
- for i, txgroup := range txgroups {
+ for i, txgroup := range request.TxnGroups {
groups[i] = makeTxnGroupResult(txgroup)
}
+ resultEvalConstants := ResultEvalOverrides{
+ AllowEmptySignatures: request.AllowEmptySignatures,
+ ExtraOpcodeBudget: request.ExtraOpcodeBudget,
+ }.AllowMoreLogging(request.AllowMoreLogging)
+
return Result{
- Version: version,
- LastRound: lastRound,
- TxnGroups: groups,
- WouldSucceed: true,
+ Version: version,
+ LastRound: lastRound,
+ TxnGroups: groups,
+ EvalOverrides: resultEvalConstants,
}, nil
}
-func makeSimulationResult(lastRound basics.Round, txgroups [][]transactions.SignedTxn) Result {
- result, err := makeSimulationResultWithVersion(lastRound, txgroups, ResultLatestVersion)
+func makeSimulationResult(lastRound basics.Round, request Request) Result {
+ result, err := makeSimulationResultWithVersion(lastRound, request, ResultLatestVersion)
if err != nil {
// this should never happen, since we pass in ResultLatestVersion
panic(err)
diff --git a/ledger/simulation/tracer.go b/ledger/simulation/tracer.go
index 7f5840753..2b30a2e7f 100644
--- a/ledger/simulation/tracer.go
+++ b/ledger/simulation/tracer.go
@@ -22,6 +22,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
)
// cursorEvalTracer is responsible for maintaining a TxnPath that points to the currently executing
@@ -47,7 +48,7 @@ func (tracer *cursorEvalTracer) AfterTxn(ep *logic.EvalParams, groupIndex int, a
tracer.previousInnerTxns = tracer.previousInnerTxns[:len(tracer.previousInnerTxns)-1]
}
-func (tracer *cursorEvalTracer) AfterTxnGroup(ep *logic.EvalParams, evalError error) {
+func (tracer *cursorEvalTracer) AfterTxnGroup(ep *logic.EvalParams, deltas *ledgercore.StateDelta, evalError error) {
top := len(tracer.relativeCursor) - 1
if len(tracer.previousInnerTxns) != 0 {
tracer.previousInnerTxns[len(tracer.previousInnerTxns)-1] += tracer.relativeCursor[top] + 1
@@ -83,8 +84,8 @@ type evalTracer struct {
failedAt TxnPath
}
-func makeEvalTracer(lastRound basics.Round, txgroup []transactions.SignedTxn) *evalTracer {
- result := makeSimulationResult(lastRound, [][]transactions.SignedTxn{txgroup})
+func makeEvalTracer(lastRound basics.Round, request Request) *evalTracer {
+ result := makeSimulationResult(lastRound, request)
return &evalTracer{result: &result}
}
@@ -130,13 +131,28 @@ func (tracer *evalTracer) BeforeTxnGroup(ep *logic.EvalParams) {
if ep.GetCaller() != nil {
// If this is an inner txn group, save the txns
tracer.populateInnerTransactions(ep.TxnGroup)
+ tracer.result.TxnGroups[0].AppBudgetAdded += uint64(ep.Proto.MaxAppProgramCost)
}
tracer.cursorEvalTracer.BeforeTxnGroup(ep)
+
+ // Currently only supports one (first) txn group
+ if ep.PooledApplicationBudget != nil && tracer.result.TxnGroups[0].AppBudgetAdded == 0 {
+ tracer.result.TxnGroups[0].AppBudgetAdded = uint64(*ep.PooledApplicationBudget)
+ }
+
+ // Override transaction group budget if specified in request, retrieve from tracer.result
+ if ep.PooledApplicationBudget != nil {
+ tracer.result.TxnGroups[0].AppBudgetAdded += tracer.result.EvalOverrides.ExtraOpcodeBudget
+ *ep.PooledApplicationBudget += int(tracer.result.EvalOverrides.ExtraOpcodeBudget)
+ }
+
+ // Override runtime related constraints against ep, before entering txn group
+ ep.EvalConstants = tracer.result.EvalOverrides.LogicEvalConstants()
}
-func (tracer *evalTracer) AfterTxnGroup(ep *logic.EvalParams, evalError error) {
+func (tracer *evalTracer) AfterTxnGroup(ep *logic.EvalParams, deltas *ledgercore.StateDelta, evalError error) {
tracer.handleError(evalError)
- tracer.cursorEvalTracer.AfterTxnGroup(ep, evalError)
+ tracer.cursorEvalTracer.AfterTxnGroup(ep, deltas, evalError)
}
func (tracer *evalTracer) saveApplyData(applyData transactions.ApplyData) {
@@ -186,8 +202,14 @@ func (tracer *evalTracer) AfterOpcode(cx *logic.EvalContext, evalError error) {
func (tracer *evalTracer) AfterProgram(cx *logic.EvalContext, evalError error) {
if cx.RunMode() != logic.ModeApp {
- // do nothing for LogicSig programs
+ // Report cost for LogicSig program and exit
+ tracer.result.TxnGroups[0].Txns[cx.GroupIndex()].LogicSigBudgetConsumed = uint64(cx.Cost())
return
}
+
+ // Report cost of this program.
+ // If it is an inner app call, roll up its cost to the top level transaction.
+ tracer.result.TxnGroups[0].Txns[tracer.relativeCursor[0]].AppBudgetConsumed += uint64(cx.Cost())
+
tracer.handleError(evalError)
}
diff --git a/ledger/simulation/tracer_test.go b/ledger/simulation/tracer_test.go
index b4429be41..190739e73 100644
--- a/ledger/simulation/tracer_test.go
+++ b/ledger/simulation/tracer_test.go
@@ -17,7 +17,6 @@
package simulation
import (
- "fmt"
"testing"
"github.com/algorand/go-algorand/data/transactions"
@@ -219,7 +218,7 @@ func TestCursorEvalTracer(t *testing.T) {
for _, tc := range testCases {
tc := tc
- t.Run(fmt.Sprintf("%s", tc.name), func(t *testing.T) {
+ t.Run(tc.name, func(t *testing.T) {
t.Parallel()
var tracer cursorEvalTracer
@@ -236,7 +235,7 @@ func TestCursorEvalTracer(t *testing.T) {
case mocktracer.BeforeTxnGroupEvent:
tracer.BeforeTxnGroup(&ep)
case mocktracer.AfterTxnGroupEvent:
- tracer.AfterTxnGroup(&ep, nil)
+ tracer.AfterTxnGroup(&ep, nil, nil)
default:
t.Fatalf("unexpected timeline hook: %v", step.action)
}
diff --git a/ledger/spverificationtracker.go b/ledger/spverificationtracker.go
new file mode 100644
index 000000000..d98974897
--- /dev/null
+++ b/ledger/spverificationtracker.go
@@ -0,0 +1,313 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/algorand/go-deadlock"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/ledger/store/trackerdb"
+ "github.com/algorand/go-algorand/logging"
+)
+
+var (
+ errSPVerificationContextNotFound = errors.New("requested state proof verification context not found")
+)
+
+type verificationDeleteContext struct {
+ confirmedRound basics.Round
+ stateProofNextRound basics.Round
+}
+
+type verificationCommitContext struct {
+ confirmedRound basics.Round
+ verificationContext ledgercore.StateProofVerificationContext
+}
+
+// spVerificationTracker is in charge of tracking context required to verify state proofs until such a time
+// as the context is no longer needed.
+type spVerificationTracker struct {
+ // pendingCommitContexts represents the part of the tracked verification context currently in memory. Each element in this
+ // array contains both the context required to verify a single state proof and context to decide whether it's possible to
+ // commit the verification context to the database.
+ pendingCommitContexts []verificationCommitContext
+
+ // pendingDeleteContexts represents the context required to delete committed state proof verification context from the
+ // database.
+ pendingDeleteContexts []verificationDeleteContext
+
+ // mu protects pendingCommitContexts and pendingDeleteContexts.
+ mu deadlock.RWMutex
+
+ // log copied from ledger
+ log logging.Logger
+
+ l ledgerForTracker
+
+ // lastLookedUpVerificationContext should store the last verification context that was looked up.
+ lastLookedUpVerificationContext ledgercore.StateProofVerificationContext
+}
+
+func (spt *spVerificationTracker) loadFromDisk(l ledgerForTracker, _ basics.Round) error {
+ spt.log = l.trackerLog()
+ spt.l = l
+
+ spt.mu.Lock()
+ defer spt.mu.Unlock()
+
+ // reset the cache
+ spt.lastLookedUpVerificationContext = ledgercore.StateProofVerificationContext{}
+
+ const initialContextArraySize = 10
+ spt.pendingCommitContexts = make([]verificationCommitContext, 0, initialContextArraySize)
+ spt.pendingDeleteContexts = make([]verificationDeleteContext, 0, initialContextArraySize)
+
+ return nil
+}
+
+func (spt *spVerificationTracker) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) {
+ currentStateProofInterval := basics.Round(blk.ConsensusProtocol().StateProofInterval)
+
+ if currentStateProofInterval == 0 {
+ return
+ }
+
+ if blk.Round()%currentStateProofInterval == 0 {
+ spt.appendCommitContext(&blk)
+ }
+
+ if delta.StateProofNext != 0 {
+ spt.appendDeleteContext(&blk, &delta)
+ }
+}
+
+func (spt *spVerificationTracker) committedUpTo(round basics.Round) (minRound, lookback basics.Round) {
+ return round, 0
+}
+
+func (spt *spVerificationTracker) produceCommittingTask(_ basics.Round, _ basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
+ return dcr
+}
+
+func (spt *spVerificationTracker) prepareCommit(dcc *deferredCommitContext) error {
+ spt.mu.RLock()
+ defer spt.mu.RUnlock()
+
+ lastContextToCommitIndex := spt.roundToLatestCommitContextIndex(dcc.newBase())
+ dcc.spVerification.commitContext = make([]verificationCommitContext, lastContextToCommitIndex+1)
+ copy(dcc.spVerification.commitContext, spt.pendingCommitContexts[:lastContextToCommitIndex+1])
+
+ dcc.spVerification.lastDeleteIndex = spt.roundToLatestDeleteContextIndex(dcc.newBase())
+ if dcc.spVerification.lastDeleteIndex >= 0 {
+ dcc.spVerification.earliestLastAttestedRound = spt.pendingDeleteContexts[dcc.spVerification.lastDeleteIndex].stateProofNextRound
+ }
+
+ return nil
+}
+
+func (spt *spVerificationTracker) commitRound(ctx context.Context, tx trackerdb.TransactionScope, dcc *deferredCommitContext) (err error) {
+ if len(dcc.spVerification.commitContext) != 0 {
+ err = commitSPContexts(ctx, tx, dcc.spVerification.commitContext)
+ if err != nil {
+ return err
+ }
+ }
+
+ if dcc.spVerification.lastDeleteIndex >= 0 {
+ err = tx.MakeSpVerificationCtxReaderWriter().DeleteOldSPContexts(ctx, dcc.spVerification.earliestLastAttestedRound)
+ }
+
+ return err
+}
+
+func commitSPContexts(ctx context.Context, tx trackerdb.TransactionScope, commitData []verificationCommitContext) error {
+ ptrToCtxs := make([]*ledgercore.StateProofVerificationContext, len(commitData))
+ for i := 0; i < len(commitData); i++ {
+ ptrToCtxs[i] = &commitData[i].verificationContext
+ }
+
+ return tx.MakeSpVerificationCtxReaderWriter().StoreSPContexts(ctx, ptrToCtxs)
+}
+
+func (spt *spVerificationTracker) postCommit(_ context.Context, dcc *deferredCommitContext) {
+ spt.mu.Lock()
+ defer spt.mu.Unlock()
+
+ spt.pendingCommitContexts = spt.pendingCommitContexts[len(dcc.spVerification.commitContext):]
+ spt.pendingDeleteContexts = spt.pendingDeleteContexts[dcc.spVerification.lastDeleteIndex+1:]
+}
+
+func (spt *spVerificationTracker) postCommitUnlocked(context.Context, *deferredCommitContext) {
+}
+
+func (spt *spVerificationTracker) handleUnorderedCommitOrError(*deferredCommitContext) {
+}
+
+func (spt *spVerificationTracker) close() {
+}
+
+func (spt *spVerificationTracker) LookupVerificationContext(stateProofLastAttestedRound basics.Round) (*ledgercore.StateProofVerificationContext, error) {
+ if lstlookup := spt.retrieveFromCache(stateProofLastAttestedRound); lstlookup != nil {
+ return lstlookup, nil
+ }
+
+ verificationContext, err := spt.lookupVerificationContext(stateProofLastAttestedRound)
+ if err != nil {
+ return nil, err
+ }
+
+ // before return, update the cache
+ spt.mu.Lock()
+ spt.lastLookedUpVerificationContext = *verificationContext
+ spt.mu.Unlock()
+
+ return verificationContext, nil
+}
+
+func (spt *spVerificationTracker) retrieveFromCache(stateProofLastAttestedRound basics.Round) *ledgercore.StateProofVerificationContext {
+ spt.mu.RLock()
+ defer spt.mu.RUnlock()
+
+ if spt.lastLookedUpVerificationContext.LastAttestedRound == stateProofLastAttestedRound &&
+ !spt.lastLookedUpVerificationContext.MsgIsZero() {
+ cpy := spt.lastLookedUpVerificationContext
+
+ return &cpy
+ }
+
+ return nil
+}
+
+func (spt *spVerificationTracker) lookupVerificationContext(stateProofLastAttestedRound basics.Round) (*ledgercore.StateProofVerificationContext, error) {
+ spt.mu.RLock()
+ defer spt.mu.RUnlock()
+
+ if len(spt.pendingCommitContexts) > 0 &&
+ stateProofLastAttestedRound >= spt.pendingCommitContexts[0].verificationContext.LastAttestedRound &&
+ stateProofLastAttestedRound <= spt.pendingCommitContexts[len(spt.pendingCommitContexts)-1].verificationContext.LastAttestedRound {
+ return spt.lookupContextInTrackedMemory(stateProofLastAttestedRound)
+ }
+
+ if len(spt.pendingCommitContexts) == 0 || stateProofLastAttestedRound < spt.pendingCommitContexts[0].verificationContext.LastAttestedRound {
+ return spt.lookupContextInDB(stateProofLastAttestedRound)
+ }
+
+ return &ledgercore.StateProofVerificationContext{}, fmt.Errorf("requested context for round %d, greater than maximum context round %d: %w",
+ stateProofLastAttestedRound,
+ spt.pendingCommitContexts[len(spt.pendingCommitContexts)-1].verificationContext.LastAttestedRound,
+ errSPVerificationContextNotFound)
+}
+
+func (spt *spVerificationTracker) lookupContextInTrackedMemory(stateProofLastAttestedRound basics.Round) (*ledgercore.StateProofVerificationContext, error) {
+ for _, commitContext := range spt.pendingCommitContexts {
+ if commitContext.verificationContext.LastAttestedRound == stateProofLastAttestedRound {
+ verificationContextCopy := commitContext.verificationContext
+ return &verificationContextCopy, nil
+ }
+ }
+
+ return &ledgercore.StateProofVerificationContext{}, fmt.Errorf("%w for round %d: memory lookup failed",
+ errSPVerificationContextNotFound, stateProofLastAttestedRound)
+}
+
+func (spt *spVerificationTracker) lookupContextInDB(stateProofLastAttestedRound basics.Round) (*ledgercore.StateProofVerificationContext, error) {
+ var spContext *ledgercore.StateProofVerificationContext
+ err := spt.l.trackerDB().Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) (err error) {
+ spContext, err = tx.MakeSpVerificationCtxReader().LookupSPContext(stateProofLastAttestedRound)
+ if err != nil {
+ err = fmt.Errorf("%w for round %d: %s", errSPVerificationContextNotFound, stateProofLastAttestedRound, err)
+ }
+
+ return err
+ })
+
+ return spContext, err
+}
+
+func (spt *spVerificationTracker) roundToLatestCommitContextIndex(committedRound basics.Round) int {
+ latestCommittedContextIndex := -1
+
+ for index, ctx := range spt.pendingCommitContexts {
+ if ctx.confirmedRound > committedRound {
+ break
+ }
+
+ latestCommittedContextIndex = index
+ }
+
+ return latestCommittedContextIndex
+}
+
+func (spt *spVerificationTracker) roundToLatestDeleteContextIndex(committedRound basics.Round) int {
+ latestCommittedContextIndex := -1
+
+ for index, ctx := range spt.pendingDeleteContexts {
+ if ctx.confirmedRound > committedRound {
+ break
+ }
+
+ latestCommittedContextIndex = index
+ }
+
+ return latestCommittedContextIndex
+}
+
+func (spt *spVerificationTracker) appendCommitContext(blk *bookkeeping.Block) {
+ spt.mu.Lock()
+ defer spt.mu.Unlock()
+
+ if len(spt.pendingCommitContexts) > 0 {
+ lastCommitConfirmedRound := spt.pendingCommitContexts[len(spt.pendingCommitContexts)-1].confirmedRound
+ if blk.Round() <= lastCommitConfirmedRound {
+ spt.log.Panicf("state proof verification: attempted to append commit context confirmed earlier than latest"+
+ "commit context, round: %d, last confirmed commit context round: %d", blk.Round(), lastCommitConfirmedRound)
+ }
+ }
+ latestRound := blk.Round() + basics.Round(blk.ConsensusProtocol().StateProofInterval)
+ commitContext := verificationCommitContext{
+ confirmedRound: blk.Round(),
+ verificationContext: *ledgercore.MakeStateProofVerificationContext(&blk.BlockHeader, latestRound),
+ }
+
+ spt.pendingCommitContexts = append(spt.pendingCommitContexts, commitContext)
+}
+
+func (spt *spVerificationTracker) appendDeleteContext(blk *bookkeeping.Block, delta *ledgercore.StateDelta) {
+ spt.mu.Lock()
+ defer spt.mu.Unlock()
+
+ if len(spt.pendingDeleteContexts) > 0 {
+ lastDeleteConfirmedRound := spt.pendingDeleteContexts[len(spt.pendingDeleteContexts)-1].confirmedRound
+ if blk.Round() <= lastDeleteConfirmedRound {
+ spt.log.Panicf("state proof verification: attempted to append delete context confirmed earlier than latest"+
+ "delete context, round: %d, last confirmed delete context round: %d", blk.Round(), lastDeleteConfirmedRound)
+ }
+ }
+
+ deletionContext := verificationDeleteContext{
+ confirmedRound: blk.Round(),
+ stateProofNextRound: delta.StateProofNext,
+ }
+
+ spt.pendingDeleteContexts = append(spt.pendingDeleteContexts, deletionContext)
+}
diff --git a/ledger/spverificationtracker_test.go b/ledger/spverificationtracker_test.go
new file mode 100644
index 000000000..e0f073fe4
--- /dev/null
+++ b/ledger/spverificationtracker_test.go
@@ -0,0 +1,492 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package ledger
+
+import (
+ "context"
+ "testing"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/ledger/store/trackerdb"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+const defaultStateProofInterval = uint64(256)
+const defaultFirstStateProofContextRound = basics.Round(defaultStateProofInterval * 2)
+const defaultFirstStateProofContextInterval = basics.Round(2)
+const unusedByStateProofTracker = basics.Round(0)
+
+type StateProofTrackingLocation uint64
+
+const (
+ any StateProofTrackingLocation = iota
+ trackerDB
+ trackerMemory
+)
+
+func initializeLedgerSpt(t *testing.T) (*mockLedgerForTracker, *spVerificationTracker) {
+ a := require.New(t)
+ accts := []map[basics.Address]basics.AccountData{makeRandomOnlineAccounts(20)}
+
+ ml := makeMockLedgerForTracker(t, true, 1, protocol.ConsensusCurrentVersion, accts)
+
+ spt := spVerificationTracker{}
+
+ conf := config.GetDefaultLocal()
+
+ _, err := trackerDBInitialize(ml, false, ".")
+ a.NoError(err)
+
+ err = ml.trackers.initialize(ml, []ledgerTracker{&spt}, conf)
+ a.NoError(err)
+ err = spt.loadFromDisk(ml, unusedByStateProofTracker)
+ a.NoError(err)
+
+ return ml, &spt
+}
+
+func mockCommit(t *testing.T, spt *spVerificationTracker, ml *mockLedgerForTracker, dbRound basics.Round, newBase basics.Round) {
+ a := require.New(t)
+
+ offset := uint64(newBase - dbRound)
+
+ dcr := deferredCommitRange{offset: offset}
+
+ dcc := deferredCommitContext{
+ deferredCommitRange: dcr,
+ }
+
+ spt.committedUpTo(newBase)
+ spt.produceCommittingTask(newBase, dbRound, &dcr)
+ err := spt.prepareCommit(&dcc)
+ a.NoError(err)
+
+ err = ml.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
+ return spt.commitRound(ctx, tx, &dcc)
+ })
+ a.NoError(err)
+
+ postCommitCtx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ spt.postCommit(postCommitCtx, &dcc)
+ spt.postCommitUnlocked(postCommitCtx, &dcc)
+}
+
+func genesisBlock() *blockEntry {
+ initialRound := basics.Round(0)
+ block := randomBlock(initialRound)
+
+ var stateTracking bookkeeping.StateProofTrackingData
+ block.block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData)
+
+ stateTracking.StateProofNextRound = basics.Round(defaultStateProofInterval * 2)
+ block.block.BlockHeader.StateProofTracking[protocol.StateProofBasic] = stateTracking
+
+ return &block
+}
+
+func blockStateProofsEnabled(prevBlock *blockEntry, stateProofInterval uint64, stuckStateProofs bool) blockEntry {
+ round := prevBlock.block.Round() + 1
+ prevBlockLastAttestedRound := prevBlock.block.StateProofTracking[protocol.StateProofBasic].StateProofNextRound
+
+ modifiedConsensus := config.Consensus[protocol.ConsensusCurrentVersion]
+ modifiedConsensus.StateProofInterval = stateProofInterval
+ config.Consensus[protocol.ConsensusCurrentVersion] = modifiedConsensus
+
+ block := randomBlock(round)
+ block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
+
+ var stateTracking bookkeeping.StateProofTrackingData
+ block.block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData)
+
+ if !stuckStateProofs && round > prevBlockLastAttestedRound {
+ stateTracking.StateProofNextRound = prevBlockLastAttestedRound + basics.Round(block.block.ConsensusProtocol().StateProofInterval)
+ } else {
+ stateTracking.StateProofNextRound = prevBlockLastAttestedRound
+ }
+
+ block.block.BlockHeader.StateProofTracking[protocol.StateProofBasic] = stateTracking
+ return block
+}
+
+func feedBlocksUpToRound(spt *spVerificationTracker, prevBlock *blockEntry, targetRound basics.Round,
+ stateProofInterval uint64, stuckStateProofs bool) *blockEntry {
+ for i := prevBlock.block.Round(); i < targetRound; i++ {
+ block := blockStateProofsEnabled(prevBlock, stateProofInterval, stuckStateProofs)
+ stateProofDelta := basics.Round(0)
+
+ prevStateProofNextRound := prevBlock.block.StateProofTracking[protocol.StateProofBasic].StateProofNextRound
+ currentStateProofNextRound := block.block.StateProofTracking[protocol.StateProofBasic].StateProofNextRound
+
+ if currentStateProofNextRound != prevStateProofNextRound {
+ stateProofDelta = currentStateProofNextRound
+ }
+
+ spt.newBlock(block.block, ledgercore.StateDelta{StateProofNext: stateProofDelta})
+ prevBlock = &block
+ }
+
+ return prevBlock
+}
+
+func verifyStateProofVerificationTracking(t *testing.T, spt *spVerificationTracker,
+ startRound basics.Round, contextAmount uint64, stateProofInterval uint64, contextPresenceExpected bool, trackingLocation StateProofTrackingLocation) {
+ a := require.New(t)
+
+ finalLastAttestedRound := startRound + basics.Round((contextAmount-1)*stateProofInterval)
+
+ for lastAttestedRound := startRound; lastAttestedRound <= finalLastAttestedRound; lastAttestedRound += basics.Round(stateProofInterval) {
+ var err error
+ switch trackingLocation {
+ case any:
+ _, err = spt.LookupVerificationContext(lastAttestedRound)
+ case trackerDB:
+ _, err = spt.lookupContextInDB(lastAttestedRound)
+ case trackerMemory:
+ _, err = spt.lookupContextInTrackedMemory(lastAttestedRound)
+ }
+
+ if contextPresenceExpected {
+ a.NoError(err)
+ } else {
+ a.ErrorIs(err, errSPVerificationContextNotFound)
+ }
+ }
+}
+
+func TestStateProofVerificationTracker_StateProofsDisabled(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ ml, spt := initializeLedgerSpt(t)
+ defer ml.Close()
+ defer spt.close()
+
+ roundsAmount := basics.Round(1000)
+ for round := basics.Round(1); round <= roundsAmount; round++ {
+ block := randomBlock(round)
+ // Last protocol version without state proofs.
+ block.block.CurrentProtocol = protocol.ConsensusV33
+ spt.newBlock(block.block, ledgercore.StateDelta{})
+ }
+
+ mockCommit(t, spt, ml, 0, roundsAmount)
+
+ verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, uint64(roundsAmount)/defaultStateProofInterval, defaultStateProofInterval, false, any)
+}
+
+func TestStateProofVerificationTracker_StateProofsNotStuck(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ ml, spt := initializeLedgerSpt(t)
+ defer ml.Close()
+ defer spt.close()
+
+ expectedContextNum := uint64(12)
+ lastBlock := feedBlocksUpToRound(spt, genesisBlock(),
+ basics.Round(expectedContextNum*defaultStateProofInterval+defaultStateProofInterval-1),
+ defaultStateProofInterval, false)
+
+ mockCommit(t, spt, ml, 0, lastBlock.block.Round())
+
+ expectedRemainingContextNum := expectedContextNum - 1
+ verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, expectedRemainingContextNum, defaultStateProofInterval, false, any)
+
+ finalLastAttestedRound := defaultFirstStateProofContextRound + basics.Round(expectedRemainingContextNum*defaultStateProofInterval)
+ // The last verification context should still be tracked since the round with the state proof transaction it is used
+ // to verify has not yet been committed.
+ verifyStateProofVerificationTracking(t, spt, finalLastAttestedRound, 1, defaultStateProofInterval, true, any)
+}
+
+func TestStateProofVerificationTracker_CommitFUllDbFlush(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ ml, spt := initializeLedgerSpt(t)
+ defer ml.Close()
+ defer spt.close()
+
+ expectedContextNum := uint64(10)
+
+ lastBlock := feedBlocksUpToRound(spt, genesisBlock(), basics.Round(expectedContextNum*defaultStateProofInterval),
+ defaultStateProofInterval, true)
+
+ mockCommit(t, spt, ml, 0, lastBlock.block.Round())
+
+ spt.lastLookedUpVerificationContext = ledgercore.StateProofVerificationContext{}
+ verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, expectedContextNum, defaultStateProofInterval, false, trackerMemory)
+ verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, expectedContextNum, defaultStateProofInterval, true, trackerDB)
+}
+
+func TestStateProofVerificationTracker_CommitPartialDbFlush(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ ml, spt := initializeLedgerSpt(t)
+ defer ml.Close()
+ defer spt.close()
+
+ contextToAdd := uint64(10)
+ _ = feedBlocksUpToRound(spt, genesisBlock(), basics.Round(contextToAdd*defaultStateProofInterval),
+ defaultStateProofInterval, true)
+
+ expectedContextInDbNum := uint64(2)
+ expectedContextInMemoryNum := contextToAdd - expectedContextInDbNum
+
+ mockCommit(t, spt, ml, 0, basics.Round(defaultStateProofInterval*expectedContextInDbNum))
+
+ verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, expectedContextInDbNum, defaultStateProofInterval, true, trackerDB)
+ verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, expectedContextInDbNum, defaultStateProofInterval, false, trackerMemory)
+
+ firstNonFlushedContextTargetRound := defaultFirstStateProofContextRound + basics.Round(expectedContextInDbNum*defaultStateProofInterval)
+ verifyStateProofVerificationTracking(t, spt, firstNonFlushedContextTargetRound, expectedContextInMemoryNum, defaultStateProofInterval, false, trackerDB)
+ verifyStateProofVerificationTracking(t, spt, firstNonFlushedContextTargetRound, expectedContextInMemoryNum, defaultStateProofInterval, true, trackerMemory)
+}
+
+func TestStateProofVerificationTracker_CommitNoDbFlush(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ ml, spt := initializeLedgerSpt(t)
+ defer ml.Close()
+ defer spt.close()
+
+ contextToAdd := uint64(10)
+ _ = feedBlocksUpToRound(spt, genesisBlock(), basics.Round(contextToAdd*defaultStateProofInterval),
+ defaultStateProofInterval, true)
+
+ mockCommit(t, spt, ml, 0, basics.Round(defaultStateProofInterval-1))
+
+ verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, contextToAdd, defaultStateProofInterval, true, trackerMemory)
+ verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, contextToAdd, defaultStateProofInterval, false, trackerDB)
+}
+
+func TestStateProofVerificationTracker_CommitFullDbPruning(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ ml, spt := initializeLedgerSpt(t)
+ defer ml.Close()
+ defer spt.close()
+
+ contextToAdd := uint64(6)
+ maxStateProofsToGenerate := contextToAdd - 1
+
+ lastStuckBlock := feedBlocksUpToRound(spt, genesisBlock(), basics.Round(contextToAdd*defaultStateProofInterval),
+ defaultStateProofInterval, true)
+ lastBlock := feedBlocksUpToRound(spt, lastStuckBlock, lastStuckBlock.block.Round()+basics.Round(maxStateProofsToGenerate),
+ defaultStateProofInterval, false)
+
+ verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, contextToAdd, defaultStateProofInterval, true, trackerMemory)
+
+ mockCommit(t, spt, ml, 0, lastBlock.block.Round())
+
+ verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, maxStateProofsToGenerate, defaultStateProofInterval, false, any)
+
+ finalLastAttestedRound := defaultFirstStateProofContextRound + basics.Round(maxStateProofsToGenerate*defaultStateProofInterval)
+ // The last verification context should still be tracked since the round with the state proof transaction it is used
+ // to verify has not yet been committed.
+ verifyStateProofVerificationTracking(t, spt, finalLastAttestedRound, 1, defaultStateProofInterval, true, any)
+}
+
+func TestStateProofVerificationTracker_CommitPartialDbPruning(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ ml, spt := initializeLedgerSpt(t)
+ defer ml.Close()
+ defer spt.close()
+
+ contextToAdd := uint64(6)
+ maxStateProofsToGenerate := contextToAdd - 1
+ contextToRemove := maxStateProofsToGenerate - 1
+
+ lastStuckBlock := feedBlocksUpToRound(spt, genesisBlock(), basics.Round(contextToAdd*defaultStateProofInterval),
+ defaultStateProofInterval, true)
+ _ = feedBlocksUpToRound(spt, lastStuckBlock,
+ lastStuckBlock.block.Round()+basics.Round(maxStateProofsToGenerate*defaultStateProofInterval),
+ defaultStateProofInterval, false)
+
+ verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, contextToAdd, defaultStateProofInterval, true, trackerMemory)
+
+ mockCommit(t, spt, ml, 0, lastStuckBlock.block.Round()+basics.Round(contextToRemove))
+
+ verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, contextToRemove, defaultStateProofInterval, false, any)
+ verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound+basics.Round(contextToRemove*defaultStateProofInterval),
+ contextToAdd-contextToRemove, defaultStateProofInterval, true, trackerDB)
+}
+
+func TestStateProofVerificationTracker_CommitNoDbPruning(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ ml, spt := initializeLedgerSpt(t)
+ defer ml.Close()
+ defer spt.close()
+
+ contextToAdd := uint64(6)
+ maxStateProofsToGenerate := contextToAdd - 1
+ offsetBeforeStateProofs := basics.Round(defaultStateProofInterval / 2)
+
+ lastStuckBlock := feedBlocksUpToRound(spt, genesisBlock(), basics.Round(contextToAdd*defaultStateProofInterval),
+ defaultStateProofInterval, true)
+
+ lastStuckBlockRound := lastStuckBlock.block.Round()
+ var block blockEntry
+ for round := lastStuckBlockRound + 1; round <= lastStuckBlockRound+offsetBeforeStateProofs; round++ {
+ block = randomBlock(round)
+ block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
+ spt.newBlock(block.block, ledgercore.StateDelta{})
+ }
+
+ _ = feedBlocksUpToRound(spt, &block, block.block.Round()+basics.Round(maxStateProofsToGenerate), defaultStateProofInterval, false)
+
+ verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, contextToAdd, defaultStateProofInterval, true, trackerMemory)
+
+ mockCommit(t, spt, ml, 0, lastStuckBlockRound)
+
+ verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, contextToAdd, defaultStateProofInterval, true, trackerDB)
+ a.Equal(maxStateProofsToGenerate, uint64(len(spt.pendingDeleteContexts)))
+}
+
+func TestStateProofVerificationTracker_StateProofIntervalChange(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ ml, spt := initializeLedgerSpt(t)
+ defer ml.Close()
+ defer spt.close()
+
+ newStateProofInterval := defaultStateProofInterval * 2
+
+ oldIntervalContext := uint64(5)
+ newIntervalContext := uint64(6)
+
+ lastOldIntervalBlock := feedBlocksUpToRound(spt, genesisBlock(), basics.Round(oldIntervalContext*defaultStateProofInterval),
+ defaultStateProofInterval, true)
+ lastStuckBlock := feedBlocksUpToRound(spt, lastOldIntervalBlock, lastOldIntervalBlock.block.Round()+basics.Round(newIntervalContext*newStateProofInterval),
+ newStateProofInterval, true)
+
+ verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, oldIntervalContext, defaultStateProofInterval,
+ true, any)
+ firstNewIntervalLastAttestedRound := lastOldIntervalBlock.block.Round() + basics.Round(defaultStateProofInterval)
+ verifyStateProofVerificationTracking(t, spt, firstNewIntervalLastAttestedRound, newIntervalContext,
+ newStateProofInterval, true, any)
+
+ newIntervalRemovedStateProofs := newIntervalContext - (newIntervalContext / 2)
+ // State Proofs for old blocks should be generated using the old interval.
+ lastOldIntervalStateProofBlock := feedBlocksUpToRound(spt, lastStuckBlock,
+ lastStuckBlock.block.Round()+basics.Round(oldIntervalContext)-1,
+ defaultStateProofInterval, false)
+ lastBlock := feedBlocksUpToRound(spt, lastOldIntervalStateProofBlock,
+ lastOldIntervalStateProofBlock.block.Round()+basics.Round(newIntervalRemovedStateProofs),
+ newStateProofInterval, false)
+
+ mockCommit(t, spt, ml, 0, lastBlock.block.Round())
+
+ firstRemainingLastAttestedRound := firstNewIntervalLastAttestedRound +
+ basics.Round(newIntervalRemovedStateProofs*newStateProofInterval)
+ verifyStateProofVerificationTracking(t, spt, defaultFirstStateProofContextRound, oldIntervalContext, defaultStateProofInterval,
+ false, any)
+ verifyStateProofVerificationTracking(t, spt, firstNewIntervalLastAttestedRound,
+ newIntervalRemovedStateProofs, newStateProofInterval, false, any)
+ verifyStateProofVerificationTracking(t, spt, firstRemainingLastAttestedRound, newIntervalContext-newIntervalRemovedStateProofs,
+ newStateProofInterval, true, any)
+}
+
+func TestStateProofVerificationTracker_LookupVerificationContext(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ ml, spt := initializeLedgerSpt(t)
+ defer ml.Close()
+ defer spt.close()
+
+ contextToAdd := uint64(10)
+ _ = feedBlocksUpToRound(spt, genesisBlock(), basics.Round(contextToAdd*defaultStateProofInterval),
+ defaultStateProofInterval, true)
+
+ expectedContextInDbNum := uint64(2)
+
+ mockCommit(t, spt, ml, 0, basics.Round(defaultStateProofInterval*expectedContextInDbNum))
+
+ _, err := spt.LookupVerificationContext(basics.Round(0))
+ a.ErrorIs(err, errSPVerificationContextNotFound)
+ a.ErrorContains(err, "no rows")
+
+ finalLastAttestedRound := basics.Round(defaultStateProofInterval + contextToAdd*defaultStateProofInterval)
+ _, err = spt.LookupVerificationContext(finalLastAttestedRound + basics.Round(defaultStateProofInterval))
+ a.ErrorIs(err, errSPVerificationContextNotFound)
+ a.ErrorContains(err, "greater than maximum")
+
+ dbContextRound := basics.Round(defaultStateProofInterval + expectedContextInDbNum*defaultStateProofInterval)
+ dbContext, err := spt.LookupVerificationContext(dbContextRound)
+ a.NoError(err)
+ a.Equal(dbContextRound, dbContext.LastAttestedRound)
+
+ memoryContextRound := basics.Round(defaultStateProofInterval + (expectedContextInDbNum+1)*defaultStateProofInterval)
+
+ memoryContext, err := spt.LookupVerificationContext(memoryContextRound)
+ a.NoError(err)
+ a.Equal(memoryContextRound, memoryContext.LastAttestedRound)
+
+ // This error shouldn't happen in normal flow - we force it to happen for the test.
+ spt.pendingCommitContexts[0].verificationContext.LastAttestedRound = 0
+ spt.lastLookedUpVerificationContext = ledgercore.StateProofVerificationContext{}
+ _, err = spt.LookupVerificationContext(memoryContextRound)
+ a.ErrorIs(err, errSPVerificationContextNotFound)
+ a.ErrorContains(err, "memory lookup failed")
+}
+
+func TestStateProofVerificationTracker_PanicInvalidBlockInsertion(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ ml, spt := initializeLedgerSpt(t)
+ defer ml.Close()
+ defer spt.close()
+
+ contextToAdd := uint64(1)
+ _ = feedBlocksUpToRound(spt, genesisBlock(), basics.Round(contextToAdd*defaultStateProofInterval),
+ defaultStateProofInterval, true)
+
+ pastBlock := randomBlock(0)
+ a.Panics(func() { spt.appendCommitContext(&pastBlock.block) })
+}
+
+func TestStateProofVerificationTracker_lastLookupContextUpdatedAfterLookup(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ mockLedger, spt := initializeLedgerSpt(t)
+ defer mockLedger.Close()
+ defer spt.close()
+
+ a.Empty(spt.lastLookedUpVerificationContext)
+
+ NumberOfVerificationContextToAdd := uint64(10)
+ _ = feedBlocksUpToRound(spt, genesisBlock(), basics.Round(NumberOfVerificationContextToAdd*defaultStateProofInterval),
+ defaultStateProofInterval, true)
+
+ a.Empty(spt.lastLookedUpVerificationContext)
+
+ expectedContextInDbNum := NumberOfVerificationContextToAdd
+ for i := uint64(defaultFirstStateProofContextInterval); i < expectedContextInDbNum; i++ {
+ vf, err := spt.LookupVerificationContext(basics.Round(defaultStateProofInterval * i))
+ a.NoError(err)
+
+ a.Equal(*vf, spt.lastLookedUpVerificationContext)
+ }
+}
diff --git a/ledger/store/merkle_committer.go b/ledger/store/merkle_committer.go
new file mode 100644
index 000000000..bc7502dac
--- /dev/null
+++ b/ledger/store/merkle_committer.go
@@ -0,0 +1,75 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package store
+
+import "database/sql"
+
+// MerkleCommitter allows storing and loading merkletrie pages from a sqlite database.
+//
+//msgp:ignore MerkleCommitter
+type MerkleCommitter struct {
+ tx *sql.Tx
+ deleteStmt *sql.Stmt
+ insertStmt *sql.Stmt
+ selectStmt *sql.Stmt
+}
+
+// MakeMerkleCommitter creates a MerkleCommitter object that implements the merkletrie.Committer interface allowing storing and loading
+// merkletrie pages from a sqlite database.
+func MakeMerkleCommitter(tx *sql.Tx, staging bool) (mc *MerkleCommitter, err error) {
+ mc = &MerkleCommitter{tx: tx}
+ accountHashesTable := "accounthashes"
+ if staging {
+ accountHashesTable = "catchpointaccounthashes"
+ }
+ mc.deleteStmt, err = tx.Prepare("DELETE FROM " + accountHashesTable + " WHERE id=?")
+ if err != nil {
+ return nil, err
+ }
+ mc.insertStmt, err = tx.Prepare("INSERT OR REPLACE INTO " + accountHashesTable + "(id, data) VALUES(?, ?)")
+ if err != nil {
+ return nil, err
+ }
+ mc.selectStmt, err = tx.Prepare("SELECT data FROM " + accountHashesTable + " WHERE id = ?")
+ if err != nil {
+ return nil, err
+ }
+ return mc, nil
+}
+
+// StorePage is the merkletrie.Committer interface implementation, stores a single page in a sqlite database table.
+func (mc *MerkleCommitter) StorePage(page uint64, content []byte) error {
+ if len(content) == 0 {
+ _, err := mc.deleteStmt.Exec(page)
+ return err
+ }
+ _, err := mc.insertStmt.Exec(page, content)
+ return err
+}
+
+// LoadPage is the merkletrie.Committer interface implementation, load a single page from a sqlite database table.
+func (mc *MerkleCommitter) LoadPage(page uint64) (content []byte, err error) {
+ err = mc.selectStmt.QueryRow(page).Scan(&content)
+ if err == sql.ErrNoRows {
+ content = nil
+ err = nil
+ return
+ } else if err != nil {
+ return nil, err
+ }
+ return content, nil
+}
diff --git a/ledger/store/trackerdb/catchpoint.go b/ledger/store/trackerdb/catchpoint.go
index b371e0d34..dfbacbd86 100644
--- a/ledger/store/trackerdb/catchpoint.go
+++ b/ledger/store/trackerdb/catchpoint.go
@@ -80,6 +80,8 @@ const (
CatchpointStateCatchupHashRound = CatchpointState("catchpointCatchupHashRound")
// CatchpointStateCatchpointLookback is the number of rounds we keep catchpoints for
CatchpointStateCatchpointLookback = CatchpointState("catchpointLookback")
+ // CatchpointStateCatchupVersion is the catchpoint version which the currently catchpoint catchup process is trying to catchup to.
+ CatchpointStateCatchupVersion = CatchpointState("catchpointCatchupVersion")
)
// UnfinishedCatchpointRecord represents a stored record of an unfinished catchpoint.
@@ -128,6 +130,9 @@ type CatchpointFirstStageInfo struct {
TotalChunks uint64 `codec:"chunksCount"`
// BiggestChunkLen is the size in the bytes of the largest chunk, used when re-packing.
BiggestChunkLen uint64 `codec:"biggestChunk"`
+
+ // StateProofVerificationHash is the hash of the state proof verification data contained in the catchpoint data file.
+ StateProofVerificationHash crypto.Digest `codec:"spVerificationHash"`
}
// MakeCatchpointFilePath builds the path of a catchpoint file.
diff --git a/ledger/store/trackerdb/interface.go b/ledger/store/trackerdb/interface.go
index b5c93cb01..9e9fbb1a1 100644
--- a/ledger/store/trackerdb/interface.go
+++ b/ledger/store/trackerdb/interface.go
@@ -113,6 +113,7 @@ type AccountsReaderExt interface {
LookupOnlineAccountDataByAddress(addr basics.Address) (ref OnlineAccountRef, data []byte, err error)
AccountsOnlineTop(rnd basics.Round, offset uint64, n uint64, proto config.ConsensusParams) (map[basics.Address]*ledgercore.OnlineAccount, error)
AccountsOnlineRoundParams() (onlineRoundParamsData []ledgercore.OnlineRoundParamsData, endRound basics.Round, err error)
+ ExpiredOnlineAccountsForRound(rnd, voteRnd basics.Round, proto config.ConsensusParams, rewardsLevel uint64) (map[basics.Address]*ledgercore.OnlineAccountData, error)
OnlineAccountsAll(maxAccounts uint64) ([]PersistedOnlineAccountData, error)
LoadTxTail(ctx context.Context, dbRound basics.Round) (roundData []*TxTailRound, roundHash []crypto.Digest, baseRound basics.Round, err error)
LoadAllFullAccounts(ctx context.Context, balancesTable string, resourcesTable string, acctCb func(basics.Address, basics.AccountData)) (count int, err error)
@@ -227,3 +228,26 @@ type CatchpointPendingHashesIter interface {
Next(ctx context.Context) (hashes [][]byte, err error)
Close()
}
+
+// SpVerificationCtxReader is a reader abstraction for stateproof verification tracker
+// Use with SnapshotScope
+type SpVerificationCtxReader interface {
+ LookupSPContext(stateProofLastAttestedRound basics.Round) (*ledgercore.StateProofVerificationContext, error)
+ GetAllSPContexts(ctx context.Context) ([]ledgercore.StateProofVerificationContext, error)
+ GetAllSPContextsFromCatchpointTbl(ctx context.Context) ([]ledgercore.StateProofVerificationContext, error)
+}
+
+// SpVerificationCtxWriter is a writer abstraction for stateproof verification tracker
+// Use with BatchScope
+type SpVerificationCtxWriter interface {
+ DeleteOldSPContexts(ctx context.Context, earliestLastAttestedRound basics.Round) error
+ StoreSPContexts(ctx context.Context, verificationContext []*ledgercore.StateProofVerificationContext) error
+ StoreSPContextsToCatchpointTbl(ctx context.Context, verificationContexts []ledgercore.StateProofVerificationContext) error
+}
+
+// SpVerificationCtxReaderWriter is SpVerificationCtxReader+SpVerificationCtxWriter
+// Use with TransactionScope
+type SpVerificationCtxReaderWriter interface {
+ SpVerificationCtxReader
+ SpVerificationCtxWriter
+}
diff --git a/ledger/store/trackerdb/msgp_gen.go b/ledger/store/trackerdb/msgp_gen.go
index ad4df82ef..e6fa865d6 100644
--- a/ledger/store/trackerdb/msgp_gen.go
+++ b/ledger/store/trackerdb/msgp_gen.go
@@ -1134,8 +1134,8 @@ func (z *BaseVotingData) MsgIsZero() bool {
func (z *CatchpointFirstStageInfo) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0001Len := uint32(6)
- var zb0001Mask uint8 /* 7 bits */
+ zb0001Len := uint32(7)
+ var zb0001Mask uint8 /* 8 bits */
if (*z).Totals.MsgIsZero() {
zb0001Len--
zb0001Mask |= 0x2
@@ -1156,10 +1156,14 @@ func (z *CatchpointFirstStageInfo) MarshalMsg(b []byte) (o []byte) {
zb0001Len--
zb0001Mask |= 0x20
}
- if (*z).TrieBalancesHash.MsgIsZero() {
+ if (*z).StateProofVerificationHash.MsgIsZero() {
zb0001Len--
zb0001Mask |= 0x40
}
+ if (*z).TrieBalancesHash.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x80
+ }
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
if zb0001Len != 0 {
@@ -1189,6 +1193,11 @@ func (z *CatchpointFirstStageInfo) MarshalMsg(b []byte) (o []byte) {
o = msgp.AppendUint64(o, (*z).TotalKVs)
}
if (zb0001Mask & 0x40) == 0 { // if not empty
+ // string "spVerificationHash"
+ o = append(o, 0xb2, 0x73, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x61, 0x73, 0x68)
+ o = (*z).StateProofVerificationHash.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x80) == 0 { // if not empty
// string "trieBalancesHash"
o = append(o, 0xb0, 0x74, 0x72, 0x69, 0x65, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x48, 0x61, 0x73, 0x68)
o = (*z).TrieBalancesHash.MarshalMsg(o)
@@ -1264,6 +1273,14 @@ func (z *CatchpointFirstStageInfo) UnmarshalMsg(bts []byte) (o []byte, err error
}
}
if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).StateProofVerificationHash.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "StateProofVerificationHash")
+ return
+ }
+ }
+ if zb0001 > 0 {
err = msgp.ErrTooManyArrayFields(zb0001)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
@@ -1322,6 +1339,12 @@ func (z *CatchpointFirstStageInfo) UnmarshalMsg(bts []byte) (o []byte, err error
err = msgp.WrapError(err, "BiggestChunkLen")
return
}
+ case "spVerificationHash":
+ bts, err = (*z).StateProofVerificationHash.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "StateProofVerificationHash")
+ return
+ }
default:
err = msgp.ErrNoField(string(field))
if err != nil {
@@ -1342,13 +1365,13 @@ func (_ *CatchpointFirstStageInfo) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *CatchpointFirstStageInfo) Msgsize() (s int) {
- s = 1 + 14 + (*z).Totals.Msgsize() + 17 + (*z).TrieBalancesHash.Msgsize() + 14 + msgp.Uint64Size + 9 + msgp.Uint64Size + 12 + msgp.Uint64Size + 13 + msgp.Uint64Size
+ s = 1 + 14 + (*z).Totals.Msgsize() + 17 + (*z).TrieBalancesHash.Msgsize() + 14 + msgp.Uint64Size + 9 + msgp.Uint64Size + 12 + msgp.Uint64Size + 13 + msgp.Uint64Size + 19 + (*z).StateProofVerificationHash.Msgsize()
return
}
// MsgIsZero returns whether this is a zero value
func (z *CatchpointFirstStageInfo) MsgIsZero() bool {
- return ((*z).Totals.MsgIsZero()) && ((*z).TrieBalancesHash.MsgIsZero()) && ((*z).TotalAccounts == 0) && ((*z).TotalKVs == 0) && ((*z).TotalChunks == 0) && ((*z).BiggestChunkLen == 0)
+ return ((*z).Totals.MsgIsZero()) && ((*z).TrieBalancesHash.MsgIsZero()) && ((*z).TotalAccounts == 0) && ((*z).TotalKVs == 0) && ((*z).TotalChunks == 0) && ((*z).BiggestChunkLen == 0) && ((*z).StateProofVerificationHash.MsgIsZero())
}
// MarshalMsg implements msgp.Marshaler
diff --git a/ledger/store/trackerdb/sqlitedriver/accountsV2.go b/ledger/store/trackerdb/sqlitedriver/accountsV2.go
index 78083c95a..d6e733090 100644
--- a/ledger/store/trackerdb/sqlitedriver/accountsV2.go
+++ b/ledger/store/trackerdb/sqlitedriver/accountsV2.go
@@ -297,6 +297,50 @@ func (r *accountsV2Reader) OnlineAccountsAll(maxAccounts uint64) ([]trackerdb.Pe
return result, nil
}
+// ExpiredOnlineAccountsForRound returns all online accounts known at `rnd` that will be expired by `voteRnd`.
+func (r *accountsV2Reader) ExpiredOnlineAccountsForRound(rnd, voteRnd basics.Round, proto config.ConsensusParams, rewardsLevel uint64) (map[basics.Address]*ledgercore.OnlineAccountData, error) {
+ // This relies on SQLite's handling of max(updround) and bare columns not in the GROUP BY.
+ // The values of votelastvalid, votefirstvalid, and data will all be from the same row as max(updround)
+ rows, err := r.q.Query(`SELECT address, data, max(updround)
+FROM onlineaccounts
+WHERE updround <= ?
+GROUP BY address
+HAVING votelastvalid < ? and votelastvalid > 0
+ORDER BY address`, rnd, voteRnd)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ ret := make(map[basics.Address]*ledgercore.OnlineAccountData)
+ for rows.Next() {
+ var addrbuf []byte
+ var buf []byte
+ var addr basics.Address
+ var baseData trackerdb.BaseOnlineAccountData
+ var updround sql.NullInt64
+ err := rows.Scan(&addrbuf, &buf, &updround)
+ if err != nil {
+ return nil, err
+ }
+ if len(addrbuf) != len(addr) {
+ err = fmt.Errorf("account DB address length mismatch: %d != %d", len(addrbuf), len(addr))
+ return nil, err
+ }
+ copy(addr[:], addrbuf)
+ err = protocol.Decode(buf, &baseData)
+ if err != nil {
+ return nil, err
+ }
+ oadata := baseData.GetOnlineAccountData(proto, rewardsLevel)
+ if _, ok := ret[addr]; ok {
+ return nil, fmt.Errorf("duplicate address in expired online accounts: %s", addr.String())
+ }
+ ret[addr] = &oadata
+ }
+ return ret, nil
+}
+
// TotalResources returns the total number of resources
func (r *accountsV2Reader) TotalResources(ctx context.Context) (total uint64, err error) {
err = r.q.QueryRowContext(ctx, "SELECT count(1) FROM resources").Scan(&total)
@@ -651,7 +695,12 @@ func (w *accountsV2Writer) TxtailNewRound(ctx context.Context, baseRound basics.
return err
}
-// OnlineAccountsDelete deleted entries with updRound <= expRound
+// OnlineAccountsDelete cleans up the Online Accounts table to prune expired entires.
+// it will delete entries with an updRound <= expRound
+// EXCEPT, it will not delete the *latest* entry for an account, no matter how old.
+// this is so that accounts whos last update is before expRound still maintain an Online Account Balance
+// After this cleanup runs, accounts in this table will have either one entry (if all entries besides the latest are expired),
+// or will have more than one entry (if multiple entries are not yet expired).
func (w *accountsV2Writer) OnlineAccountsDelete(forgetBefore basics.Round) (err error) {
rows, err := w.e.Query("SELECT rowid, address, updRound, data FROM onlineaccounts WHERE updRound < ? ORDER BY address, updRound DESC", forgetBefore)
if err != nil {
diff --git a/ledger/store/trackerdb/sqlitedriver/catchpoint.go b/ledger/store/trackerdb/sqlitedriver/catchpoint.go
index 9bf15eb47..388749858 100644
--- a/ledger/store/trackerdb/sqlitedriver/catchpoint.go
+++ b/ledger/store/trackerdb/sqlitedriver/catchpoint.go
@@ -468,6 +468,7 @@ func (cw *catchpointWriter) ResetCatchpointStagingBalances(ctx context.Context,
"DROP TABLE IF EXISTS catchpointpendinghashes",
"DROP TABLE IF EXISTS catchpointresources",
"DROP TABLE IF EXISTS catchpointkvstore",
+ "DROP TABLE IF EXISTS catchpointstateproofverification",
"DELETE FROM accounttotals where id='catchpointStaging'",
}
@@ -489,6 +490,7 @@ func (cw *catchpointWriter) ResetCatchpointStagingBalances(ctx context.Context,
"CREATE TABLE IF NOT EXISTS catchpointaccounthashes (id integer primary key, data blob)",
"CREATE TABLE IF NOT EXISTS catchpointresources (addrid INTEGER NOT NULL, aidx INTEGER NOT NULL, data BLOB NOT NULL, PRIMARY KEY (addrid, aidx) ) WITHOUT ROWID",
"CREATE TABLE IF NOT EXISTS catchpointkvstore (key blob primary key, value blob)",
+ "CREATE TABLE IF NOT EXISTS catchpointstateproofverification (lastattestedround INTEGER PRIMARY KEY NOT NULL, verificationContext BLOB NOT NULL)",
createNormalizedOnlineBalanceIndex(idxnameBalances, "catchpointbalances"), // should this be removed ?
createUniqueAddressBalanceIndex(idxnameAddress, "catchpointbalances"),
@@ -514,12 +516,14 @@ func (cw *catchpointWriter) ApplyCatchpointStagingBalances(ctx context.Context,
"DROP TABLE IF EXISTS accounthashes",
"DROP TABLE IF EXISTS resources",
"DROP TABLE IF EXISTS kvstore",
+ "DROP TABLE IF EXISTS stateproofverification",
"ALTER TABLE catchpointbalances RENAME TO accountbase",
"ALTER TABLE catchpointassetcreators RENAME TO assetcreators",
"ALTER TABLE catchpointaccounthashes RENAME TO accounthashes",
"ALTER TABLE catchpointresources RENAME TO resources",
"ALTER TABLE catchpointkvstore RENAME TO kvstore",
+ "ALTER TABLE catchpointstateproofverification RENAME TO stateproofverification",
}
for _, stmt := range stmts {
diff --git a/ledger/store/trackerdb/sqlitedriver/schema.go b/ledger/store/trackerdb/sqlitedriver/schema.go
index 5ca781e00..47cb0180d 100644
--- a/ledger/store/trackerdb/sqlitedriver/schema.go
+++ b/ledger/store/trackerdb/sqlitedriver/schema.go
@@ -149,6 +149,15 @@ const createUnfinishedCatchpointsTable = `
round integer primary key NOT NULL,
blockhash blob NOT NULL)`
+const createStateProofVerificationTableQuery = `
+ CREATE TABLE IF NOT EXISTS stateproofverification (
+ lastattestedround integer primary key NOT NULL,
+ verificationcontext blob NOT NULL)`
+
+const createVoteLastValidIndex = `
+ CREATE INDEX IF NOT EXISTS onlineaccounts_votelastvalid_idx
+ ON onlineaccounts ( votelastvalid )`
+
var accountsResetExprs = []string{
`DROP TABLE IF EXISTS acctrounds`,
`DROP TABLE IF EXISTS accounttotals`,
@@ -164,6 +173,7 @@ var accountsResetExprs = []string{
`DROP TABLE IF EXISTS onlineroundparamstail`,
`DROP TABLE IF EXISTS catchpointfirststageinfo`,
`DROP TABLE IF EXISTS unfinishedcatchpoints`,
+ `DROP TABLE IF EXISTS stateproofverification`,
}
// accountsInit fills the database using tx with initAccounts if the
@@ -344,6 +354,12 @@ func accountsCreateBoxTable(ctx context.Context, tx *sql.Tx) error {
return nil
}
+// performKVStoreNullBlobConversion scans keys with null blob value, and convert the value to `[]byte{}`.
+func performKVStoreNullBlobConversion(ctx context.Context, tx *sql.Tx) error {
+ _, err := tx.ExecContext(ctx, "UPDATE kvstore SET value = '' WHERE value is NULL")
+ return err
+}
+
func accountsCreateTxTailTable(ctx context.Context, tx *sql.Tx) (err error) {
for _, stmt := range createTxTailTable {
_, err = tx.ExecContext(ctx, stmt)
@@ -374,6 +390,11 @@ func accountsCreateUnfinishedCatchpointsTable(ctx context.Context, e db.Executab
return err
}
+func createStateProofVerificationTable(ctx context.Context, e db.Executable) error {
+ _, err := e.ExecContext(ctx, createStateProofVerificationTableQuery)
+ return err
+}
+
// performResourceTableMigration migrate the database to use the resources table.
func performResourceTableMigration(ctx context.Context, tx *sql.Tx, log func(processed, total uint64)) (err error) {
now := time.Now().UnixNano()
@@ -910,3 +931,9 @@ func reencodeAccounts(ctx context.Context, tx *sql.Tx) (modifiedAccounts uint, e
updateStmt.Close()
return
}
+
+func convertOnlineRoundParamsTail(ctx context.Context, tx *sql.Tx) error {
+ // create vote last index
+ _, err := tx.ExecContext(ctx, createVoteLastValidIndex)
+ return err
+}
diff --git a/ledger/store/trackerdb/sqlitedriver/spVerificationAccessor.go b/ledger/store/trackerdb/sqlitedriver/spVerificationAccessor.go
new file mode 100644
index 000000000..3a024e0a9
--- /dev/null
+++ b/ledger/store/trackerdb/sqlitedriver/spVerificationAccessor.go
@@ -0,0 +1,164 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package sqlitedriver
+
+import (
+ "context"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/ledger/store/trackerdb"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util/db"
+)
+
+type stateProofVerificationReader struct {
+ q db.Queryable
+}
+
+type stateProofVerificationWriter struct {
+ e db.Executable
+}
+
+type stateProofVerificationReaderWriter struct {
+ stateProofVerificationReader
+ stateProofVerificationWriter
+}
+
+func makeStateProofVerificationReader(q db.Queryable) *stateProofVerificationReader {
+ return &stateProofVerificationReader{q: q}
+}
+
+func makeStateProofVerificationWriter(e db.Executable) *stateProofVerificationWriter {
+ return &stateProofVerificationWriter{e: e}
+}
+
+func makeStateProofVerificationReaderWriter(q db.Queryable, e db.Executable) *stateProofVerificationReaderWriter {
+ return &stateProofVerificationReaderWriter{
+ stateProofVerificationReader{q: q},
+ stateProofVerificationWriter{e: e},
+ }
+}
+
+// MakeStateProofVerificationReader returns SpVerificationCtxReader for accessing from outside of ledger
+func MakeStateProofVerificationReader(q db.Queryable) trackerdb.SpVerificationCtxReader {
+ return makeStateProofVerificationReader(q)
+}
+
+// LookupSPContext retrieves stateproof verification context from the database.
+func (spa *stateProofVerificationReader) LookupSPContext(stateProofLastAttestedRound basics.Round) (*ledgercore.StateProofVerificationContext, error) {
+ verificationContext := ledgercore.StateProofVerificationContext{}
+ queryFunc := func() error {
+ row := spa.q.QueryRow("SELECT verificationcontext FROM stateproofverification WHERE lastattestedround=?", stateProofLastAttestedRound)
+ var buf []byte
+ err := row.Scan(&buf)
+ if err != nil {
+ return err
+ }
+ err = protocol.Decode(buf, &verificationContext)
+ if err != nil {
+ return err
+ }
+ return nil
+ }
+
+ err := db.Retry(queryFunc)
+ return &verificationContext, err
+}
+
+// DeleteOldSPContexts removes a single state proof verification data from the database.
+func (spa *stateProofVerificationWriter) DeleteOldSPContexts(ctx context.Context, earliestLastAttestedRound basics.Round) error {
+ _, err := spa.e.ExecContext(ctx, "DELETE FROM stateproofverification WHERE lastattestedround < ?", earliestLastAttestedRound)
+ return err
+}
+
+// StoreSPContexts stores a single state proof verification context to database
+func (spa *stateProofVerificationWriter) StoreSPContexts(ctx context.Context, verificationContext []*ledgercore.StateProofVerificationContext) error {
+ spWriteStmt, err := spa.e.PrepareContext(ctx, "INSERT INTO stateProofVerification(lastattestedround, verificationContext) VALUES(?, ?)")
+ if err != nil {
+ return err
+ }
+ for i := range verificationContext {
+ _, err = spWriteStmt.ExecContext(ctx, verificationContext[i].LastAttestedRound, protocol.Encode(verificationContext[i]))
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// StoreSPContextsToCatchpointTbl stores state proof verification contexts to catchpoint staging table
+func (spa *stateProofVerificationWriter) StoreSPContextsToCatchpointTbl(ctx context.Context, verificationContexts []ledgercore.StateProofVerificationContext) error {
+ spWriteStmt, err := spa.e.PrepareContext(ctx, "INSERT INTO catchpointstateproofverification(lastattestedround, verificationContext) VALUES(?, ?)")
+ if err != nil {
+ return err
+ }
+
+ for i := range verificationContexts {
+ _, err = spWriteStmt.ExecContext(ctx, verificationContexts[i].LastAttestedRound, protocol.Encode(&verificationContexts[i]))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetAllSPContexts returns all contexts needed to verify state proofs.
+func (spa *stateProofVerificationReader) GetAllSPContexts(ctx context.Context) ([]ledgercore.StateProofVerificationContext, error) {
+ return spa.getAllSPContextsInternal(ctx, "SELECT verificationContext FROM stateProofVerification ORDER BY lastattestedround")
+}
+
+// GetAllSPContextsFromCatchpointTbl returns all state proof verification data from the catchpointStateProofVerification table.
+func (spa *stateProofVerificationReader) GetAllSPContextsFromCatchpointTbl(ctx context.Context) ([]ledgercore.StateProofVerificationContext, error) {
+ return spa.getAllSPContextsInternal(ctx, "SELECT verificationContext FROM catchpointStateProofVerification ORDER BY lastattestedround")
+}
+
+func (spa *stateProofVerificationReader) getAllSPContextsInternal(ctx context.Context, query string) ([]ledgercore.StateProofVerificationContext, error) {
+ var result []ledgercore.StateProofVerificationContext
+ queryFunc := func() error {
+ rows, err := spa.q.QueryContext(ctx, query)
+ if err != nil {
+ return err
+ }
+
+ defer rows.Close()
+
+ // Clear `res` in case this function is repeated.
+ result = result[:0]
+ for rows.Next() {
+ var rawData []byte
+ err = rows.Scan(&rawData)
+ if err != nil {
+ return err
+ }
+
+ var record ledgercore.StateProofVerificationContext
+ err = protocol.Decode(rawData, &record)
+ if err != nil {
+ return err
+ }
+
+ result = append(result, record)
+ }
+
+ return nil
+ }
+
+ err := db.Retry(queryFunc)
+ return result, err
+}
diff --git a/ledger/store/trackerdb/sqlitedriver/store_sqlite_impl.go b/ledger/store/trackerdb/sqlitedriver/store_sqlite_impl.go
index 3696ed37e..74327be2b 100644
--- a/ledger/store/trackerdb/sqlitedriver/store_sqlite_impl.go
+++ b/ledger/store/trackerdb/sqlitedriver/store_sqlite_impl.go
@@ -204,6 +204,10 @@ func (txs sqlTransactionScope) MakeEncodedAccoutsBatchIter() trackerdb.EncodedAc
return MakeEncodedAccoutsBatchIter(txs.tx)
}
+func (txs sqlTransactionScope) MakeSpVerificationCtxReaderWriter() trackerdb.SpVerificationCtxReaderWriter {
+ return makeStateProofVerificationReaderWriter(txs.tx, txs.tx)
+}
+
func (txs sqlTransactionScope) RunMigrations(ctx context.Context, params trackerdb.Params, log logging.Logger, targetVersion int32) (mgr trackerdb.InitParams, err error) {
return RunMigrations(ctx, txs.tx, params, log, targetVersion)
}
@@ -263,6 +267,10 @@ func (bs sqlBatchScope) AccountsUpdateSchemaTest(ctx context.Context) (err error
return AccountsUpdateSchemaTest(ctx, bs.tx)
}
+func (bs sqlBatchScope) MakeSpVerificationCtxWriter() trackerdb.SpVerificationCtxWriter {
+ return makeStateProofVerificationWriter(bs.tx)
+}
+
func (ss sqlSnapshotScope) MakeAccountsReader() (trackerdb.AccountsReaderExt, error) {
return NewAccountsSQLReaderWriter(ss.tx), nil
}
@@ -274,3 +282,7 @@ func (ss sqlSnapshotScope) MakeCatchpointReader() (trackerdb.CatchpointReader, e
func (ss sqlSnapshotScope) MakeCatchpointPendingHashesIterator(hashCount int) trackerdb.CatchpointPendingHashesIter {
return MakeCatchpointPendingHashesIterator(hashCount, ss.tx)
}
+
+func (ss sqlSnapshotScope) MakeSpVerificationCtxReader() trackerdb.SpVerificationCtxReader {
+ return makeStateProofVerificationReader(ss.tx)
+}
diff --git a/ledger/store/trackerdb/sqlitedriver/testing.go b/ledger/store/trackerdb/sqlitedriver/testing.go
index 2a96a661b..8d0a61afd 100644
--- a/ledger/store/trackerdb/sqlitedriver/testing.go
+++ b/ledger/store/trackerdb/sqlitedriver/testing.go
@@ -103,6 +103,9 @@ func AccountsInitTest(tb testing.TB, tx *sql.Tx, initAccounts map[basics.Address
err = accountsCreateBoxTable(context.Background(), tx)
require.NoError(tb, err)
+ err = performKVStoreNullBlobConversion(context.Background(), tx)
+ require.NoError(tb, err)
+
return newDB
}
@@ -125,5 +128,8 @@ func AccountsUpdateSchemaTest(ctx context.Context, tx *sql.Tx) (err error) {
if err := accountsCreateBoxTable(ctx, tx); err != nil {
return err
}
+ if err := createStateProofVerificationTable(ctx, tx); err != nil {
+ return err
+ }
return nil
}
diff --git a/ledger/store/trackerdb/sqlitedriver/trackerdbV2.go b/ledger/store/trackerdb/sqlitedriver/trackerdbV2.go
index 358d33d2d..2c7a370f0 100644
--- a/ledger/store/trackerdb/sqlitedriver/trackerdbV2.go
+++ b/ledger/store/trackerdb/sqlitedriver/trackerdbV2.go
@@ -131,6 +131,12 @@ func RunMigrations(ctx context.Context, tx *sql.Tx, params trackerdb.Params, log
tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 8 : %v", err)
return
}
+ case 9:
+ err = tu.upgradeDatabaseSchema9(ctx, tx)
+ if err != nil {
+ tu.log.Warnf("trackerDBInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 9 : %v", err)
+ return
+ }
default:
return trackerdb.InitParams{}, fmt.Errorf("trackerDBInitialize unable to upgrade database from schema version %d", tu.schemaVersion)
}
@@ -479,6 +485,29 @@ func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema8(ctx context.Context
return tu.setVersion(ctx, tx, 9)
}
+// upgradeDatabaseSchema9 upgrades the database schema from version 9 to version 10,
+// adding a new stateproofverification table,
+// scrubbing out all nil values from kvstore table and replace with empty byte slice.
+func (tu *trackerDBSchemaInitializer) upgradeDatabaseSchema9(ctx context.Context, tx *sql.Tx) (err error) {
+ err = createStateProofVerificationTable(ctx, tx)
+ if err != nil {
+ return err
+ }
+
+ err = performKVStoreNullBlobConversion(ctx, tx)
+ if err != nil {
+ return fmt.Errorf("upgradeDatabaseSchema9 unable to replace kvstore nil entries with empty byte slices : %v", err)
+ }
+
+ err = convertOnlineRoundParamsTail(ctx, tx)
+ if err != nil {
+ return fmt.Errorf("upgradeDatabaseSchema10 unable to convert onlineroundparamstail: %v", err)
+ }
+
+ // update version
+ return tu.setVersion(ctx, tx, 10)
+}
+
func removeEmptyDirsOnSchemaUpgrade(dbDirectory string) (err error) {
catchpointRootDir := filepath.Join(dbDirectory, trackerdb.CatchpointDirName)
if _, err := os.Stat(catchpointRootDir); os.IsNotExist(err) {
diff --git a/ledger/store/trackerdb/store.go b/ledger/store/trackerdb/store.go
index 879f0cf95..17e0e720a 100644
--- a/ledger/store/trackerdb/store.go
+++ b/ledger/store/trackerdb/store.go
@@ -31,6 +31,7 @@ type BatchScope interface {
MakeAccountsOptimizedWriter(hasAccounts, hasResources, hasKvPairs, hasCreatables bool) (AccountsWriter, error)
ResetTransactionWarnDeadline(ctx context.Context, deadline time.Time) (prevDeadline time.Time, err error)
Testing() TestBatchScope
+ MakeSpVerificationCtxWriter() SpVerificationCtxWriter
}
// SnapshotScope is the read scope to the store.
@@ -38,6 +39,8 @@ type SnapshotScope interface {
MakeAccountsReader() (AccountsReaderExt, error)
MakeCatchpointReader() (CatchpointReader, error)
MakeCatchpointPendingHashesIterator(hashCount int) CatchpointPendingHashesIter
+
+ MakeSpVerificationCtxReader() SpVerificationCtxReader
}
// TransactionScope is the read/write scope to the store.
@@ -53,6 +56,7 @@ type TransactionScope interface {
RunMigrations(ctx context.Context, params Params, log logging.Logger, targetVersion int32) (mgr InitParams, err error)
ResetTransactionWarnDeadline(ctx context.Context, deadline time.Time) (prevDeadline time.Time, err error)
Testing() TestTransactionScope
+ MakeSpVerificationCtxReaderWriter() SpVerificationCtxReaderWriter
}
// BatchFn is the callback lambda used in `Batch`.
diff --git a/ledger/store/trackerdb/version.go b/ledger/store/trackerdb/version.go
index 42917f262..e619d2dd5 100644
--- a/ledger/store/trackerdb/version.go
+++ b/ledger/store/trackerdb/version.go
@@ -19,4 +19,4 @@ package trackerdb
// AccountDBVersion is the database version that this binary would know how to support and how to upgrade to.
// details about the content of each of the versions can be found in the upgrade functions upgradeDatabaseSchemaXXXX
// and their descriptions.
-var AccountDBVersion = int32(9)
+var AccountDBVersion = int32(10)
diff --git a/ledger/testing/consensusRange.go b/ledger/testing/consensusRange.go
index 02ae83fce..019b50271 100644
--- a/ledger/testing/consensusRange.go
+++ b/ledger/testing/consensusRange.go
@@ -47,9 +47,9 @@ var consensusByNumber = []protocol.ConsensusVersion{
protocol.ConsensusV23,
protocol.ConsensusV24, // AVM v2 (apps)
protocol.ConsensusV25,
- protocol.ConsensusV26,
+ protocol.ConsensusV26, // AVM v3
protocol.ConsensusV27,
- protocol.ConsensusV28,
+ protocol.ConsensusV28, // AVM v4 (direct refs)
protocol.ConsensusV29,
protocol.ConsensusV30, // AVM v5 (inner txs)
protocol.ConsensusV31, // AVM v6 (inner txs with appls)
@@ -57,7 +57,9 @@ var consensusByNumber = []protocol.ConsensusVersion{
protocol.ConsensusV33, // 320 rounds
protocol.ConsensusV34, // AVM v7, stateproofs
protocol.ConsensusV35, // minor, double upgrade withe v34
- protocol.ConsensusV36, // box storage
+ protocol.ConsensusV36, // AVM v8, box storage
+ protocol.ConsensusV37,
+ protocol.ConsensusV38, // AVM v9, ECDSA pre-check, stateproofs recoverability
protocol.ConsensusFuture,
}
diff --git a/ledger/testing/consensusRange_test.go b/ledger/testing/consensusRange_test.go
index cd5baaa81..26e042094 100644
--- a/ledger/testing/consensusRange_test.go
+++ b/ledger/testing/consensusRange_test.go
@@ -56,6 +56,6 @@ func TestReleasedVersion(t *testing.T) {
}
require.Equal(t, versionStringFromIndex(len(consensusByNumber)-1), "vFuture")
- require.Equal(t, versionStringFromIndex(36), "v36")
+ require.Equal(t, versionStringFromIndex(38), "v38")
}
diff --git a/ledger/testing/initState.go b/ledger/testing/initState.go
index 00cbe0d1b..374c1d6c4 100644
--- a/ledger/testing/initState.go
+++ b/ledger/testing/initState.go
@@ -21,7 +21,6 @@ import (
"github.com/stretchr/testify/require"
- "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
basics_testing "github.com/algorand/go-algorand/data/basics/testing"
@@ -46,7 +45,6 @@ func init() {
// GenerateInitState generates testing init state
func GenerateInitState(tb testing.TB, proto protocol.ConsensusVersion, baseAlgoPerAccount int) (genesisInitState ledgercore.InitState, initKeys map[basics.Address]*crypto.SignatureSecrets) {
- params := config.Consensus[proto]
poolAddr := testPoolAddr
sinkAddr := testSinkAddr
@@ -76,40 +74,19 @@ func GenerateInitState(tb testing.TB, proto protocol.ConsensusVersion, baseAlgoP
initKeys[sinkAddr] = sinkSecret
initAccounts[sinkAddr] = basics_testing.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 7654321})
- incentivePoolBalanceAtGenesis := initAccounts[poolAddr].MicroAlgos
- var initialRewardsPerRound uint64
- if params.InitialRewardsRateCalculation {
- initialRewardsPerRound = basics.SubSaturate(incentivePoolBalanceAtGenesis.Raw, params.MinBalance) / uint64(params.RewardsRateRefreshInterval)
- } else {
- initialRewardsPerRound = incentivePoolBalanceAtGenesis.Raw / uint64(params.RewardsRateRefreshInterval)
- }
+ genesisBalances := bookkeeping.MakeTimestampedGenesisBalances(initAccounts, sinkAddr, poolAddr, 0)
+ genesisID := tb.Name()
+ genesisHash := crypto.Hash([]byte(genesisID))
- initBlock := bookkeeping.Block{
- BlockHeader: bookkeeping.BlockHeader{
- GenesisID: tb.Name(),
- Round: 0,
- RewardsState: bookkeeping.RewardsState{
- RewardsRate: initialRewardsPerRound,
- RewardsPool: poolAddr,
- FeeSink: sinkAddr,
- },
- UpgradeState: bookkeeping.UpgradeState{
- CurrentProtocol: proto,
- },
- },
- }
+ initBlock, err := bookkeeping.MakeGenesisBlock(proto, genesisBalances, genesisID, genesisHash)
+ require.NoError(tb, err)
- var err error
initBlock.TxnCommitments, err = initBlock.PaysetCommit()
require.NoError(tb, err)
- if params.SupportGenesisHash {
- initBlock.BlockHeader.GenesisHash = crypto.Hash([]byte(tb.Name()))
- }
-
genesisInitState.Block = initBlock
genesisInitState.Accounts = initAccounts
- genesisInitState.GenesisHash = crypto.Hash([]byte(tb.Name()))
+ genesisInitState.GenesisHash = genesisHash
return
}
diff --git a/ledger/testing/testGenesis.go b/ledger/testing/testGenesis.go
index 80d752c75..98a41d06d 100644
--- a/ledger/testing/testGenesis.go
+++ b/ledger/testing/testGenesis.go
@@ -25,11 +25,29 @@ import (
"github.com/algorand/go-algorand/protocol"
)
+// testGenesisCfg provides a configuration object for NewTestGenesis.
+type testGenesisCfg struct {
+ rewardsPoolAmount basics.MicroAlgos
+}
+
+// TestGenesisOption provides functional options for testGenesisCfg.
+type TestGenesisOption func(*testGenesisCfg)
+
+// TestGenesisRewardsPoolSize configures the rewards pool size in the genesis block.
+func TestGenesisRewardsPoolSize(amount basics.MicroAlgos) TestGenesisOption {
+ return func(cfg *testGenesisCfg) { cfg.rewardsPoolAmount = amount }
+}
+
// NewTestGenesis creates a bunch of accounts, splits up 10B algos
// between them and the rewardspool and feesink, and gives out the
// addresses and secrets it creates to enable tests. For special
// scenarios, manipulate these return values before using newTestLedger.
-func NewTestGenesis() (bookkeeping.GenesisBalances, []basics.Address, []*crypto.SignatureSecrets) {
+func NewTestGenesis(opts ...TestGenesisOption) (bookkeeping.GenesisBalances, []basics.Address, []*crypto.SignatureSecrets) {
+ var cfg testGenesisCfg
+ for _, opt := range opts {
+ opt(&cfg)
+ }
+
// irrelevant, but deterministic
sink, err := basics.UnmarshalChecksumAddress("YTPRLJ2KK2JRFSZZNAF57F3K5Y2KCG36FZ5OSYLW776JJGAUW5JXJBBD7Q")
if err != nil {
@@ -66,8 +84,12 @@ func NewTestGenesis() (bookkeeping.GenesisBalances, []basics.Address, []*crypto.
Status: basics.NotParticipating,
}
+ poolBal := basics.MicroAlgos{Raw: amount}
+ if cfg.rewardsPoolAmount.Raw > 0 {
+ poolBal = cfg.rewardsPoolAmount
+ }
accts[rewards] = basics.AccountData{
- MicroAlgos: basics.MicroAlgos{Raw: amount},
+ MicroAlgos: poolBal,
}
genBalances := bookkeeping.MakeGenesisBalances(accts, sink, rewards)
diff --git a/ledger/tracker.go b/ledger/tracker.go
index 5be0acd29..39ea9d4b1 100644
--- a/ledger/tracker.go
+++ b/ledger/tracker.go
@@ -28,7 +28,7 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/eval"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/ledger/store/trackerdb"
"github.com/algorand/go-algorand/logging"
@@ -118,10 +118,11 @@ type ledgerTracker interface {
// An optional context is provided for long-running operations.
postCommitUnlocked(context.Context, *deferredCommitContext)
- // handleUnorderedCommit is a special method for handling deferred commits that are out of order.
+ // handleUnorderedCommitOrError is a special method for handling deferred commits that are out of order
+ // or to handle errors reported by other trackers while committing a batch.
// Tracker might update own state in this case. For example, account updates tracker cancels
- // scheduled catchpoint writing that deferred commit.
- handleUnorderedCommit(*deferredCommitContext)
+ // scheduled catchpoint writing flag for this batch.
+ handleUnorderedCommitOrError(*deferredCommitContext)
// close terminates the tracker, reclaiming any resources
// like open database connections or goroutines. close may
@@ -136,7 +137,7 @@ type ledgerForTracker interface {
trackerDB() trackerdb.TrackerStore
blockDB() db.Pair
trackerLog() logging.Logger
- trackerEvalVerified(bookkeeping.Block, internal.LedgerForEvaluator) (ledgercore.StateDelta, error)
+ trackerEvalVerified(bookkeeping.Block, eval.LedgerForEvaluator) (ledgercore.StateDelta, error)
Latest() basics.Round
Block(basics.Round) (bookkeeping.Block, error)
@@ -214,12 +215,6 @@ type deferredCommitRange struct {
// a catchpoint data file, in this commit cycle iteration.
catchpointFirstStage bool
- // catchpointDataWriting is a pointer to a variable with the same name in the
- // catchpointTracker. It's used in order to reset the catchpointDataWriting flag from
- // the acctupdates's prepareCommit/commitRound (which is called before the
- // corresponding catchpoint tracker method.
- catchpointDataWriting *int32
-
// enableGeneratingCatchpointFiles controls whether the node produces catchpoint files or not.
enableGeneratingCatchpointFiles bool
@@ -269,6 +264,15 @@ type deferredCommitContext struct {
stats telemetryspec.AccountsUpdateMetrics
updateStats bool
+
+ spVerification struct {
+ // state proof verification deletion information
+ lastDeleteIndex int
+ earliestLastAttestedRound basics.Round
+
+ // state proof verification commit information
+ commitContext []verificationCommitContext
+ }
}
func (dcc deferredCommitContext) newBase() basics.Round {
@@ -402,14 +406,40 @@ func (tr *trackerRegistry) scheduleCommit(blockqRound, maxLookback basics.Round)
// ( unless we're creating a catchpoint, in which case we want to flush it right away
// so that all the instances of the catchpoint would contain exactly the same data )
flushTime := time.Now()
- if dcc != nil && !flushTime.After(tr.lastFlushTime.Add(balancesFlushInterval)) && !dcc.catchpointFirstStage && !dcc.catchpointSecondStage && dcc.pendingDeltas < pendingDeltasFlushThreshold {
- dcc = nil
+
+ // Some tracker want to flush
+ if dcc != nil {
+ // skip this flush if none of these conditions met:
+ // - has it been at least balancesFlushInterval since the last flush?
+ flushIntervalPassed := flushTime.After(tr.lastFlushTime.Add(balancesFlushInterval))
+ // - does this commit task also include catchpoint file creation activity for the dcc.oldBase+dcc.offset?
+ flushForCatchpoint := dcc.catchpointFirstStage || dcc.catchpointSecondStage
+ // - have more than pendingDeltasFlushThreshold accounts been modified since the last flush?
+ flushAccounts := dcc.pendingDeltas >= pendingDeltasFlushThreshold
+ if !(flushIntervalPassed || flushForCatchpoint || flushAccounts) {
+ dcc = nil
+ }
}
tr.mu.RUnlock()
if dcc != nil {
+ // Increment the waitgroup first, otherwise this goroutine can be interrupted
+ // and commitSyncer attempts calling Done() on empty wait group.
tr.accountsWriting.Add(1)
- tr.deferredCommits <- dcc
+ select {
+ case tr.deferredCommits <- dcc:
+ default:
+ // Do NOT block if deferredCommits cannot accept this task, skip it.
+ // Note: the next attempt will include these rounds plus some extra rounds.
+ // The main reason for slow commits is catchpoint file creation (when commitSyncer calls
+ // commitRound, which calls postCommitUnlocked). This producer thread is called by
+ // blockQueue.syncer() upon successful block DB flush, which calls ledger.notifyCommit()
+ // and trackerRegistry.committedUpTo() after taking the trackerMu.Lock().
+ // This means a blocking write to deferredCommits will block Ledger reads (TODO use more fine-grained locks).
+ // Dropping this dcc allows the blockqueue syncer to continue persisting other blocks
+ // and ledger reads to proceed without being blocked by trackerMu lock.
+ tr.accountsWriting.Done()
+ }
}
}
@@ -479,7 +509,7 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) error {
if tr.dbRound < dbRound || offset < uint64(tr.dbRound-dbRound) {
tr.log.Warnf("out of order deferred commit: offset %d, dbRound %d but current tracker DB round is %d", offset, dbRound, tr.dbRound)
for _, lt := range tr.trackers {
- lt.handleUnorderedCommit(dcc)
+ lt.handleUnorderedCommitOrError(dcc)
}
tr.mu.RUnlock()
return nil
@@ -503,19 +533,27 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) error {
dcc.oldBase = dbRound
dcc.flushTime = time.Now()
+ var err error
for _, lt := range tr.trackers {
- err := lt.prepareCommit(dcc)
+ err = lt.prepareCommit(dcc)
if err != nil {
tr.log.Errorf(err.Error())
- tr.mu.RUnlock()
- return err
+ break
}
}
+ if err != nil {
+ for _, lt := range tr.trackers {
+ lt.handleUnorderedCommitOrError(dcc)
+ }
+ tr.mu.RUnlock()
+ return err
+ }
+
tr.mu.RUnlock()
start := time.Now()
ledgerCommitroundCount.Inc(nil)
- err := tr.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
+ err = tr.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) {
arw, err := tx.MakeAccountsReaderWriter()
if err != nil {
return err
@@ -533,6 +571,9 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) error {
ledgerCommitroundMicros.AddMicrosecondsSince(start, nil)
if err != nil {
+ for _, lt := range tr.trackers {
+ lt.handleUnorderedCommitOrError(dcc)
+ }
tr.log.Warnf("unable to advance tracker db snapshot (%d-%d): %v", dbRound, dbRound+basics.Round(offset), err)
return err
}
@@ -692,8 +733,8 @@ func (tr *trackerRegistry) replay(l ledgerForTracker) (err error) {
roundsBehind = blk.Round() - tr.dbRound
tr.mu.RUnlock()
- // are we too far behind ? ( taking into consideration the catchpoint writing, which can stall the writing for quite a bit )
- if roundsBehind > initializeCachesRoundFlushInterval+basics.Round(catchpointInterval) {
+ // are we farther behind than we need to be? Consider: catchpoint interval, flush interval and max acct lookback.
+ if roundsBehind > basics.Round(maxAcctLookback) && roundsBehind > initializeCachesRoundFlushInterval+basics.Round(catchpointInterval) {
// we're unable to persist changes. This is unexpected, but there is no point in keep trying batching additional changes since any further changes
// would just accumulate in memory.
close(blockEvalFailed)
diff --git a/ledger/tracker_test.go b/ledger/tracker_test.go
index 1671e0302..09c7401b8 100644
--- a/ledger/tracker_test.go
+++ b/ledger/tracker_test.go
@@ -191,8 +191,8 @@ func (bt *producePrepareBlockingTracker) postCommit(ctx context.Context, dcc *de
func (bt *producePrepareBlockingTracker) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) {
}
-// handleUnorderedCommit is not used by the blockingTracker
-func (bt *producePrepareBlockingTracker) handleUnorderedCommit(*deferredCommitContext) {
+// handleUnorderedCommitOrError is not used by the blockingTracker
+func (bt *producePrepareBlockingTracker) handleUnorderedCommitOrError(*deferredCommitContext) {
}
// close is not used by the blockingTracker
diff --git a/ledger/txtail.go b/ledger/txtail.go
index a4095c644..a86a8af5b 100644
--- a/ledger/txtail.go
+++ b/ledger/txtail.go
@@ -313,7 +313,7 @@ func (t *txTail) postCommit(ctx context.Context, dcc *deferredCommitContext) {
func (t *txTail) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) {
}
-func (t *txTail) handleUnorderedCommit(*deferredCommitContext) {
+func (t *txTail) handleUnorderedCommitOrError(*deferredCommitContext) {
}
func (t *txTail) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange {
diff --git a/ledger/txtail_test.go b/ledger/txtail_test.go
index ba4a09755..a21af5120 100644
--- a/ledger/txtail_test.go
+++ b/ledger/txtail_test.go
@@ -19,7 +19,6 @@ package ledger
import (
"context"
"errors"
- "fmt"
"testing"
"github.com/stretchr/testify/require"
@@ -259,7 +258,6 @@ func TestTxTailDeltaTracking(t *testing.T) {
err := txtail.loadFromDisk(&ledger, ledger.Latest())
require.NoError(t, err)
- fmt.Printf("%d, %s\n", len(txtail.recent), protoVersion)
require.Equal(t, int(config.Consensus[protoVersion].MaxTxnLife), len(txtail.recent))
require.Equal(t, testTxTailValidityRange, len(txtail.lastValid))
require.Equal(t, ledger.Latest(), txtail.lowWaterMark)
diff --git a/ledger/voters.go b/ledger/voters.go
index 1648e707f..d5645ede6 100644
--- a/ledger/voters.go
+++ b/ledger/voters.go
@@ -20,15 +20,30 @@ import (
"fmt"
"sync"
- "github.com/algorand/go-algorand/stateproof"
+ "github.com/algorand/go-deadlock"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/stateproof"
)
+// votersFetcher is used to provide safe access to the ledger while creating the state proof builder. Since the operation
+// is being run under the ledger's commit operation, this implementation guarantees lockless access to the VotersForStateProof function.
+type votersFetcher struct {
+ vt *votersTracker
+}
+
+func (vf *votersFetcher) VotersForStateProof(rnd basics.Round) (*ledgercore.VotersForRound, error) {
+ return vf.vt.VotersForStateProof(rnd)
+}
+
+func (vf *votersFetcher) BlockHdr(rnd basics.Round) (bookkeeping.BlockHeader, error) {
+ return vf.vt.l.BlockHdr(rnd)
+}
+
// The votersTracker maintains the vector commitment for the most recent
// commitments to online accounts for state proofs.
//
@@ -62,14 +77,18 @@ type votersTracker struct {
// Thus, we maintain X in the votersForRoundCache map until we form a stateproof
// for round X+StateProofVotersLookback+StateProofInterval.
//
- // In case state proof chain stalls this map would be bounded to StateProofMaxRecoveryIntervals + 3
+ // In case state proof chain stalls this map would be bounded to StateProofMaxRecoveryIntervals + 3 in respect
+ // to the db round.
// + 1 - since votersForRoundCache needs to contain an entry for a future state proof
// + 1 - since votersForRoundCache needs to contain an entry to verify the earliest state proof
// in the recovery interval. i.e. it needs to have an entry for R-StateProofMaxRecoveryIntervals-StateProofInterval
// to verify R-StateProofMaxRecoveryIntervals
// + 1 would only appear if the sampled round R is: interval - lookback < R < interval.
// in this case, the tracker would not yet remove the old one but will create a new one for future state proof.
+ // Additionally, the tracker would contain an entry for every state proof interval between the latest round in the
+ // ledger and the db round.
votersForRoundCache map[basics.Round]*ledgercore.VotersForRound
+ votersMu deadlock.RWMutex
l ledgerForTracker
onlineAccountsFetcher ledgercore.OnlineAccountsFetcher
@@ -77,6 +96,11 @@ type votersTracker struct {
// loadWaitGroup syncronizing the completion of the loadTree call so that we can
// shutdown the tracker without leaving any running go-routines.
loadWaitGroup sync.WaitGroup
+
+ // commitListener provides a callback to call on each prepare commit. This callback receives access to the voters
+ // cache.
+ commitListener ledgercore.VotersCommitListener
+ commitListenerMu deadlock.RWMutex
}
// votersRoundForStateProofRound computes the round number whose voting participants
@@ -90,9 +114,11 @@ func votersRoundForStateProofRound(stateProofRnd basics.Round, proto *config.Con
}
func (vt *votersTracker) loadFromDisk(l ledgerForTracker, fetcher ledgercore.OnlineAccountsFetcher, latestDbRound basics.Round) error {
+ vt.votersMu.Lock()
vt.l = l
- vt.votersForRoundCache = make(map[basics.Round]*ledgercore.VotersForRound)
vt.onlineAccountsFetcher = fetcher
+ vt.votersForRoundCache = make(map[basics.Round]*ledgercore.VotersForRound)
+ vt.votersMu.Unlock()
latestRoundInLedger := l.Latest()
hdr, err := l.BlockHdr(latestRoundInLedger)
@@ -132,8 +158,8 @@ func (vt *votersTracker) loadFromDisk(l ledgerForTracker, fetcher ledgercore.Onl
func (vt *votersTracker) loadTree(hdr bookkeeping.BlockHeader) {
r := hdr.Round
- _, ok := vt.votersForRoundCache[r]
- if ok {
+ _, exists := vt.getVoters(r)
+ if exists {
// Already loaded.
return
}
@@ -147,7 +173,7 @@ func (vt *votersTracker) loadTree(hdr bookkeeping.BlockHeader) {
tr := ledgercore.MakeVotersForRound()
tr.Proto = proto
- vt.votersForRoundCache[r] = tr
+ vt.setVoters(r, tr)
vt.loadWaitGroup.Add(1)
go func() {
@@ -174,18 +200,16 @@ func (vt *votersTracker) newBlock(hdr bookkeeping.BlockHeader) {
return
}
- vt.removeOldVoters(hdr)
-
// This might be a block where we snapshot the online participants,
// to eventually construct a vector commitment in a later
// block.
- r := uint64(hdr.Round)
- if (r+proto.StateProofVotersLookback)%proto.StateProofInterval != 0 {
+ r := hdr.Round
+ if (uint64(r)+proto.StateProofVotersLookback)%proto.StateProofInterval != 0 {
return
}
- _, ok := vt.votersForRoundCache[basics.Round(r)]
- if ok {
+ _, exists := vt.getVoters(r)
+ if exists {
vt.l.trackerLog().Errorf("votersTracker.newBlock: round %d already present", r)
} else {
vt.loadTree(hdr)
@@ -193,20 +217,49 @@ func (vt *votersTracker) newBlock(hdr bookkeeping.BlockHeader) {
}
+func (vt *votersTracker) prepareCommit(dcc *deferredCommitContext) error {
+ vt.commitListenerMu.RLock()
+ defer vt.commitListenerMu.RUnlock()
+
+ if vt.commitListener == nil {
+ return nil
+ }
+
+ commitListener := vt.commitListener
+ vf := votersFetcher{vt: vt}
+ // In case the listener's function fails, we do not want to break the commit process.
+ // To implement this hierarchy we've decided to not include a return value in OnPrepareVoterCommit function
+ commitListener.OnPrepareVoterCommit(dcc.oldBase, dcc.newBase(), &vf)
+
+ return nil
+}
+
+func (vt *votersTracker) postCommit(dcc *deferredCommitContext) {
+ lastHeaderCommitted, err := vt.l.BlockHdr(dcc.newBase())
+ if err != nil {
+ vt.l.trackerLog().Errorf("votersTracker.postCommit: could not retrieve header for round %d: %v", dcc.newBase(), err)
+ return
+ }
+
+ // Voters older than lastHeaderCommitted.Round() - StateProofMaxRecoveryIntervals * StateProofInterval are
+ // guaranteed to be removed here.
+ vt.removeOldVoters(lastHeaderCommitted)
+}
+
// removeOldVoters removes voters data form the tracker and allows the database to commit previous rounds.
// voters would be removed if one of the two condition is met
// 1 - Voters are for a round which was already been confirmed by stateproof
// 2 - Voters are for a round which is older than the allowed recovery interval.
-// notice that if state proof chain is delayed, votersForRoundCache will not be larger than
-// StateProofMaxRecoveryIntervals + 1
-// ( In order to be able to build and verify X stateproofs back we need X + 1 voters data )
//
// It is possible to optimize this function and not to travers votersForRoundCache on every round.
-// Since the map is small (Usually 0 - 2 elements and up to StateProofMaxRecoveryIntervals) we decided to keep the code simple
+// Since the map is small (Usually 0 - 2 elements) we decided to keep the code simple
// and check for deletion in every round.
func (vt *votersTracker) removeOldVoters(hdr bookkeeping.BlockHeader) {
lowestStateProofRound := stateproof.GetOldestExpectedStateProof(&hdr)
+ vt.votersMu.Lock()
+ defer vt.votersMu.Unlock()
+
for r, tr := range vt.votersForRoundCache {
commitRound := r + basics.Round(tr.Proto.StateProofVotersLookback)
stateProofRound := commitRound + basics.Round(tr.Proto.StateProofInterval)
@@ -225,18 +278,23 @@ func (vt *votersTracker) removeOldVoters(hdr bookkeeping.BlockHeader) {
// not need any blocks, it returns base.
func (vt *votersTracker) lowestRound(base basics.Round) basics.Round {
minRound := base
+
+ vt.votersMu.RLock()
+ defer vt.votersMu.RUnlock()
+
for r := range vt.votersForRoundCache {
if r < minRound {
minRound = r
}
}
+
return minRound
}
-// getVoters() returns the top online participants from round r.
-func (vt *votersTracker) getVoters(r basics.Round) (*ledgercore.VotersForRound, error) {
- tr, ok := vt.votersForRoundCache[r]
- if !ok {
+// VotersForStateProof returns the top online participants from round r.
+func (vt *votersTracker) VotersForStateProof(r basics.Round) (*ledgercore.VotersForRound, error) {
+ tr, exists := vt.getVoters(r)
+ if !exists {
// Not tracked: stateproofs not enabled.
return nil, nil
}
@@ -249,3 +307,35 @@ func (vt *votersTracker) getVoters(r basics.Round) (*ledgercore.VotersForRound,
return tr, nil
}
+
+func (vt *votersTracker) registerPrepareCommitListener(commitListener ledgercore.VotersCommitListener) {
+ vt.commitListenerMu.Lock()
+ defer vt.commitListenerMu.Unlock()
+
+ if vt.commitListener != nil {
+ vt.l.trackerLog().Error("votersTracker.registerPrepareCommitListener: overriding existing listener.")
+ }
+ vt.commitListener = commitListener
+}
+
+func (vt *votersTracker) unregisterPrepareCommitListener() {
+ vt.commitListenerMu.Lock()
+ defer vt.commitListenerMu.Unlock()
+
+ vt.commitListener = nil
+}
+
+func (vt *votersTracker) getVoters(round basics.Round) (*ledgercore.VotersForRound, bool) {
+ vt.votersMu.RLock()
+ defer vt.votersMu.RUnlock()
+
+ tr, ok := vt.votersForRoundCache[round]
+ return tr, ok
+}
+
+func (vt *votersTracker) setVoters(round basics.Round, voters *ledgercore.VotersForRound) {
+ vt.votersMu.Lock()
+ defer vt.votersMu.Unlock()
+
+ vt.votersForRoundCache[round] = voters
+}
diff --git a/ledger/voters_test.go b/ledger/voters_test.go
index 892d3679c..190997c92 100644
--- a/ledger/voters_test.go
+++ b/ledger/voters_test.go
@@ -31,12 +31,37 @@ import (
"github.com/stretchr/testify/require"
)
-func addBlockToAccountsUpdate(blk bookkeeping.Block, ao *onlineAccounts, totals ledgercore.AccountTotals) {
+func addBlockToAccountsUpdate(t *testing.T, blk bookkeeping.Block, ml *mockLedgerForTracker) {
updates := ledgercore.MakeAccountDeltas(1)
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
delta.Accts.MergeAccounts(updates)
+ _, totals, err := ml.trackers.accts.LatestTotals()
+ require.NoError(t, err)
delta.Totals = totals
- ao.newBlock(blk, delta)
+ ml.addBlock(blockEntry{block: blk}, delta)
+}
+
+func addRandomBlock(t *testing.T, ml *mockLedgerForTracker) {
+ block := randomBlock(ml.Latest() + 1)
+ block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
+ addBlockToAccountsUpdate(t, block.block, ml)
+}
+
+func commitStateProofBlock(t *testing.T, ml *mockLedgerForTracker, stateProofNextRound basics.Round) {
+ var stateTracking bookkeeping.StateProofTrackingData
+ block := randomBlock(ml.Latest() + 1)
+ block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
+ stateTracking.StateProofNextRound = stateProofNextRound
+ block.block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData)
+ block.block.BlockHeader.StateProofTracking[protocol.StateProofBasic] = stateTracking
+
+ addBlockToAccountsUpdate(t, block.block, ml)
+ commitAll(t, ml)
+}
+
+func commitAll(t *testing.T, ml *mockLedgerForTracker) {
+ dcc := commitSyncPartial(t, ml.trackers.acctsOnline, ml, ml.Latest())
+ commitSyncPartialComplete(t, ml.trackers.acctsOnline, ml, dcc)
}
func checkVoters(a *require.Assertions, ao *onlineAccounts, expectedSize uint64) {
@@ -95,34 +120,23 @@ func TestVoterTrackerDeleteVotersAfterStateproofConfirmed(t *testing.T) {
defer ml.Close()
conf := config.GetDefaultLocal()
+ // To cause all blocks to be committed, for easier processing by the voters tracker.
+ conf.MaxAcctLookback = 0
au, ao := newAcctUpdates(t, ml, conf)
defer au.close()
defer ao.close()
- _, totals, err := au.LatestTotals()
- require.NoError(t, err)
-
i := uint64(1)
// adding blocks to the voterstracker (in order to pass the numOfIntervals*stateproofInterval we add 1)
for ; i < (numOfIntervals*intervalForTest)+1; i++ {
- block := randomBlock(basics.Round(i))
- block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
- addBlockToAccountsUpdate(block.block, ao, totals)
+ addRandomBlock(t, ml)
}
checkVoters(a, ao, numOfIntervals)
a.Equal(basics.Round(intervalForTest-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
- block := randomBlock(basics.Round(i))
- i++
- block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
-
// committing stateproof that confirm the (numOfIntervals - 1)th interval
- var stateTracking bookkeeping.StateProofTrackingData
- stateTracking.StateProofNextRound = basics.Round((numOfIntervals - 1) * intervalForTest)
- block.block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData)
- block.block.BlockHeader.StateProofTracking[protocol.StateProofBasic] = stateTracking
- addBlockToAccountsUpdate(block.block, ao, totals)
+ commitStateProofBlock(t, ml, basics.Round((numOfIntervals-1)*intervalForTest))
// the tracker should have 3 entries
// - voters to confirm the numOfIntervals - 1 th interval
@@ -131,12 +145,7 @@ func TestVoterTrackerDeleteVotersAfterStateproofConfirmed(t *testing.T) {
checkVoters(a, ao, 3)
a.Equal(basics.Round((numOfIntervals-2)*intervalForTest-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
- block = randomBlock(basics.Round(i))
- block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
- stateTracking.StateProofNextRound = basics.Round(numOfIntervals * intervalForTest)
- block.block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData)
- block.block.BlockHeader.StateProofTracking[protocol.StateProofBasic] = stateTracking
- addBlockToAccountsUpdate(block.block, ao, totals)
+ commitStateProofBlock(t, ml, basics.Round(numOfIntervals*intervalForTest))
checkVoters(a, ao, 2)
a.Equal(basics.Round((numOfIntervals-1)*intervalForTest-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
@@ -166,25 +175,24 @@ func TestLimitVoterTracker(t *testing.T) {
defer ml.Close()
conf := config.GetDefaultLocal()
+ // To cause all blocks to be committed, for easier processing by the voters tracker.
+ conf.MaxAcctLookback = 0
au, ao := newAcctUpdates(t, ml, conf)
defer au.close()
defer ao.close()
- _, totals, err := au.LatestTotals()
- require.NoError(t, err)
-
i := uint64(1)
// since the first state proof is expected to happen on stateproofInterval*2 we would start give-up on state proofs
- // after intervalForTest*(recoveryIntervalForTests+3)
+ // after intervalForTest*(recoveryIntervalForTests+3) are committed
// should not give up on any state proof
for ; i < intervalForTest*(recoveryIntervalForTests+2); i++ {
- block := randomBlock(basics.Round(i))
- block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
- addBlockToAccountsUpdate(block.block, ao, totals)
+ addRandomBlock(t, ml)
}
+ commitAll(t, ml)
+
// the votersForRoundCache should contains recoveryIntervalForTests+2 elements:
// recoveryIntervalForTests - since this is the recovery interval
// + 1 - since votersForRoundCache would contain the votersForRound for the next state proof to come
@@ -194,38 +202,38 @@ func TestLimitVoterTracker(t *testing.T) {
// after adding the round intervalForTest*(recoveryIntervalForTests+3)+1 we expect the voter tracker to remove voters
for ; i < intervalForTest*(recoveryIntervalForTests+3)+1; i++ {
- block := randomBlock(basics.Round(i))
- block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
- addBlockToAccountsUpdate(block.block, ao, totals)
+ addRandomBlock(t, ml)
}
+ commitAll(t, ml)
+
checkVoters(a, ao, recoveryIntervalForTests+2)
a.Equal(basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval*2-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
// after adding the round intervalForTest*(recoveryIntervalForTests+3)+1 we expect the voter tracker to remove voters
for ; i < intervalForTest*(recoveryIntervalForTests+4)+1; i++ {
- block := randomBlock(basics.Round(i))
- block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
- addBlockToAccountsUpdate(block.block, ao, totals)
+ addRandomBlock(t, ml)
}
+
+ commitAll(t, ml)
checkVoters(a, ao, recoveryIntervalForTests+2)
a.Equal(basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval*3-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
// if the last round of the intervalForTest has not been added to the ledger the votersTracker would
// retain one more element
for ; i < intervalForTest*(recoveryIntervalForTests+5); i++ {
- block := randomBlock(basics.Round(i))
- block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
- addBlockToAccountsUpdate(block.block, ao, totals)
+ addRandomBlock(t, ml)
}
+
+ commitAll(t, ml)
checkVoters(a, ao, recoveryIntervalForTests+3)
a.Equal(basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval*3-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
for ; i < intervalForTest*(recoveryIntervalForTests+5)+1; i++ {
- block := randomBlock(basics.Round(i))
- block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
- addBlockToAccountsUpdate(block.block, ao, totals)
+ addRandomBlock(t, ml)
}
+
+ commitAll(t, ml)
checkVoters(a, ao, recoveryIntervalForTests+2)
a.Equal(basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval*4-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
}
@@ -257,17 +265,12 @@ func TestTopNAccountsThatHaveNoMssKeys(t *testing.T) {
defer au.close()
defer ao.close()
- _, totals, err := au.LatestTotals()
- require.NoError(t, err)
-
i := uint64(1)
for ; i < (intervalForTest)+1; i++ {
- block := randomBlock(basics.Round(i))
- block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
- addBlockToAccountsUpdate(block.block, ao, totals)
+ addRandomBlock(t, ml)
}
- top, err := ao.voters.getVoters(basics.Round(intervalForTest - lookbackForTest))
+ top, err := ao.voters.VotersForStateProof(basics.Round(intervalForTest - lookbackForTest))
a.NoError(err)
for j := 0; j < len(top.Participants); j++ {
a.Equal(merklesignature.NoKeysCommitment, top.Participants[j].PK.Commitment)
diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go
index 4756aa155..65621e610 100644
--- a/libgoal/libgoal.go
+++ b/libgoal/libgoal.go
@@ -504,11 +504,10 @@ func (c *Client) signAndBroadcastTransactionWithWallet(walletHandle, pw []byte,
//
// validRounds | lastValid | result (lastValid)
// -------------------------------------------------
-// 0 | 0 | firstValid + maxTxnLife
-// 0 | N | lastValid
-// M | 0 | first + validRounds - 1
-// M | M | error
-//
+// 0 | 0 | firstValid + maxTxnLife
+// 0 | N | lastValid
+// M | 0 | first + validRounds - 1
+// M | M | error
func (c *Client) ComputeValidityRounds(firstValid, lastValid, validRounds uint64) (first, last, latest uint64, err error) {
params, err := c.cachedSuggestedParams()
if err != nil {
@@ -1025,7 +1024,7 @@ func (c *Client) VerifyParticipationKey(timeout time.Duration, participationID s
func (c *Client) RemoveParticipationKey(participationID string) error {
algod, err := c.ensureAlgodClient()
if err != nil {
- return nil
+ return err
}
return algod.RemoveParticipationKeyByID(participationID)
@@ -1270,6 +1269,26 @@ func (c *Client) Dryrun(data []byte) (resp model.DryrunResponse, err error) {
return
}
+// SimulateTransactionsRaw simulates a transaction group by taking raw request bytes and returns relevant simulation results.
+func (c *Client) SimulateTransactionsRaw(encodedRequest []byte) (result v2.PreEncodedSimulateResponse, err error) {
+ algod, err := c.ensureAlgodClient()
+ if err != nil {
+ return
+ }
+ var resp []byte
+ resp, err = algod.RawSimulateRawTransaction(encodedRequest)
+ if err != nil {
+ return
+ }
+ err = protocol.DecodeReflect(resp, &result)
+ return
+}
+
+// SimulateTransactions simulates transactions and returns relevant simulation results.
+func (c *Client) SimulateTransactions(request v2.PreEncodedSimulateRequest) (result v2.PreEncodedSimulateResponse, err error) {
+ return c.SimulateTransactionsRaw(protocol.EncodeReflect(&request))
+}
+
// TransactionProof returns a Merkle proof for a transaction in a block.
func (c *Client) TransactionProof(txid string, round uint64, hashType crypto.HashType) (resp model.TransactionProofResponse, err error) {
algod, err := c.ensureAlgodClient()
diff --git a/libgoal/teal.go b/libgoal/teal.go
index 90295799c..26fcbaa63 100644
--- a/libgoal/teal.go
+++ b/libgoal/teal.go
@@ -18,13 +18,14 @@ package libgoal
import (
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/transactions/logic"
)
// Compile compiles the given program and returned the compiled program
-func (c *Client) Compile(program []byte) (compiledProgram []byte, compiledProgramHash crypto.Digest, err error) {
+func (c *Client) Compile(program []byte, useSourceMap bool) (compiledProgram []byte, compiledProgramHash crypto.Digest, sourcemap *logic.SourceMap, err error) {
algod, err2 := c.ensureAlgodClient()
if err2 != nil {
- return nil, crypto.Digest{}, err2
+ return nil, crypto.Digest{}, nil, err2
}
- return algod.Compile(program)
+ return algod.Compile(program, useSourceMap)
}
diff --git a/logging/telemetryspec/event.go b/logging/telemetryspec/event.go
index b50ac7eab..4a8280591 100644
--- a/logging/telemetryspec/event.go
+++ b/logging/telemetryspec/event.go
@@ -196,7 +196,7 @@ type PeerEventDetails struct {
InstanceName string
// Endpoint is the dialed-to address, for an outgoing connection. Not being used for incoming connection.
Endpoint string `json:",omitempty"`
- // MessageDelay is the avarage relative message delay. Not being used for incoming connection.
+ // MessageDelay is the average relative message delay. Not being used for incoming connection.
MessageDelay int64 `json:",omitempty"`
}
@@ -291,7 +291,7 @@ type PeerConnectionDetails struct {
ConnectionDuration uint
// Endpoint is the dialed-to address, for an outgoing connection. Not being used for incoming connection.
Endpoint string `json:",omitempty"`
- // MessageDelay is the avarage relative message delay. Not being used for incoming connection.
+ // MessageDelay is the average relative message delay. Not being used for incoming connection.
MessageDelay int64 `json:",omitempty"`
// DuplicateFilterCount is the number of times this peer has sent us a message hash to filter that it had already sent before.
DuplicateFilterCount uint64
@@ -307,6 +307,8 @@ const CatchpointGenerationEvent Event = "CatchpointGeneration"
// CatchpointGenerationEventDetails is generated once a catchpoint file is being created, and provide
// some statistics about that event.
type CatchpointGenerationEventDetails struct {
+ // AccountsRound the round in which the account snapshot is taken
+ AccountsRound uint64
// WritingDuration is the total elapsed time it took to write the catchpoint file.
WritingDuration uint64
// CPUTime is the single-core time spent waiting to the catchpoint file to be written.
@@ -322,8 +324,10 @@ type CatchpointGenerationEventDetails struct {
KVsCount uint64
// FileSize is the size of the catchpoint file, in bytes.
FileSize uint64
- // CatchpointLabel is the catchpoint label for which the catchpoint file was generated.
- CatchpointLabel string
+ // MerkleTrieRootHash is the merkle trie root hash represents all accounts and kvs
+ MerkleTrieRootHash string
+ // SPVerificationCtxsHash is the hash of all the state proof verification contexts in the catchpoint
+ SPVerificationCtxsHash string
}
// CatchpointRootUpdateEvent event
diff --git a/logging/telemetryspec/metric.go b/logging/telemetryspec/metric.go
index da5f4a753..bdd504e2f 100644
--- a/logging/telemetryspec/metric.go
+++ b/logging/telemetryspec/metric.go
@@ -80,7 +80,7 @@ type StateProofStats struct {
// AssembleBlockTimeout represents AssembleBlock exiting due to timeout
const AssembleBlockTimeout = "timeout"
-// AssembleBlockTimeout represents AssembleBlock giving up after a timeout and returning an empty block
+// AssembleBlockTimeoutEmpty represents AssembleBlock giving up after a timeout and returning an empty block
const AssembleBlockTimeoutEmpty = "timeout-empty"
// AssembleBlockFull represents AssembleBlock exiting due to block being full
diff --git a/netdeploy/remote/deployedNetwork.go b/netdeploy/remote/deployedNetwork.go
index a6b7be3f7..4bf395514 100644
--- a/netdeploy/remote/deployedNetwork.go
+++ b/netdeploy/remote/deployedNetwork.go
@@ -757,7 +757,7 @@ func createSignedTx(src basics.Address, round basics.Round, params config.Consen
return []transactions.SignedTxn{}, err
}
approval := ops.Program
- ops, err = logic.AssembleString("#pragma version 2 int 1")
+ ops, err = logic.AssembleString("#pragma version 2\nint 1")
if err != nil {
panic(err)
}
diff --git a/netdeploy/remote/nodecfg/nodeConfigurator.go b/netdeploy/remote/nodecfg/nodeConfigurator.go
index 4621373b2..c398e8c44 100644
--- a/netdeploy/remote/nodecfg/nodeConfigurator.go
+++ b/netdeploy/remote/nodecfg/nodeConfigurator.go
@@ -39,6 +39,7 @@ type nodeConfigurator struct {
genesisData bookkeeping.Genesis
bootstrappedBlockFile string
bootstrappedTrackerFile string
+ bootstrappedTrackerDir string
relayEndpoints []srvEntry
metricsEndpoints []srvEntry
}
@@ -78,6 +79,12 @@ func (nc *nodeConfigurator) apply(rootConfigDir, rootNodeDir string) (err error)
nc.bootstrappedTrackerFile = trackerFile
}
+ trackerDir := filepath.Join(rootConfigDir, "genesisdata", "bootstrapped")
+ trackerDirExists := util.FileExists(trackerDir)
+ if trackerDirExists {
+ nc.bootstrappedTrackerDir = trackerDir
+ }
+
nc.genesisFile = filepath.Join(rootConfigDir, "genesisdata", config.GenesisJSONFile)
nc.genesisData, err = bookkeeping.LoadGenesisFromFile(nc.genesisFile)
nodeDirs, err := nc.prepareNodeDirs(nc.config.Nodes, rootConfigDir, rootNodeDir)
@@ -150,20 +157,30 @@ func (nc *nodeConfigurator) prepareNodeDirs(configs []remote.NodeConfig, rootCon
}
// Copy the bootstrapped files into current ledger folder
- if nc.bootstrappedBlockFile != "" && nc.bootstrappedTrackerFile != "" {
+ if nc.bootstrappedBlockFile != "" &&
+ (nc.bootstrappedTrackerFile != "" || nc.bootstrappedTrackerDir != "") {
fmt.Fprintf(os.Stdout, "... copying block database file to ledger folder ...\n")
dest := filepath.Join(nodeDest, genesisDir, fmt.Sprintf("%s.block.sqlite", config.LedgerFilenamePrefix))
_, err = util.CopyFile(nc.bootstrappedBlockFile, dest)
if err != nil {
return nil, fmt.Errorf("failed to copy database file %s from %s to %s : %w", "bootstrapped.block.sqlite", filepath.Dir(nc.bootstrappedBlockFile), dest, err)
}
- fmt.Fprintf(os.Stdout, "... copying tracker database file to ledger folder ...\n")
- dest = filepath.Join(nodeDest, genesisDir, fmt.Sprintf("%s.tracker.sqlite", config.LedgerFilenamePrefix))
- _, err = util.CopyFile(nc.bootstrappedTrackerFile, dest)
- if err != nil {
- return nil, fmt.Errorf("failed to copy database file %s from %s to %s : %w", "bootstrapped.tracker.sqlite", filepath.Dir(nc.bootstrappedBlockFile), dest, err)
+ if nc.bootstrappedTrackerFile != "" {
+ fmt.Fprintf(os.Stdout, "... copying tracker database file to ledger folder ...\n")
+ dest = filepath.Join(nodeDest, genesisDir, fmt.Sprintf("%s.tracker.sqlite", config.LedgerFilenamePrefix))
+ _, err = util.CopyFile(nc.bootstrappedTrackerFile, dest)
+ if err != nil {
+ return nil, fmt.Errorf("failed to copy database file %s from %s to %s : %w", filepath.Base(nc.bootstrappedBlockFile), filepath.Dir(nc.bootstrappedBlockFile), dest, err)
+ }
+ }
+ if nc.bootstrappedTrackerDir != "" {
+ fmt.Fprintf(os.Stdout, "... copying tracker database directory to ledger folder ...\n")
+ dest = filepath.Join(nodeDest, genesisDir, config.LedgerFilenamePrefix)
+ err = util.CopyFolder(nc.bootstrappedTrackerDir, dest)
+ if err != nil {
+ return nil, fmt.Errorf("failed to copy database directory from %s to %s : %w", nc.bootstrappedTrackerDir, dest, err)
+ }
}
-
}
nodeDirs = append(nodeDirs, nodeDir{
diff --git a/network/wsNetwork.go b/network/wsNetwork.go
index 0280952d1..919655352 100644
--- a/network/wsNetwork.go
+++ b/network/wsNetwork.go
@@ -101,8 +101,12 @@ const slowWritingPeerMonitorInterval = 5 * time.Second
// to the log file. Note that the log file itself would also json-encode these before placing them in the log file.
const unprintableCharacterGlyph = "▯"
-// match config.PublicAddress to this string to automatically set PublicAddress from Address()
-const autoconfigPublicAddress = "auto"
+// testingPublicAddress is used in identity exchange tests for a predictable
+// PublicAddress (which will match HTTP Listener's Address) in tests only.
+const testingPublicAddress = "testing"
+
+// Maximum number of bytes to read from a header when trying to establish a websocket connection.
+const wsMaxHeaderBytes = 4096
var networkIncomingConnections = metrics.MakeGauge(metrics.NetworkIncomingConnections)
var networkOutgoingConnections = metrics.MakeGauge(metrics.NetworkOutgoingConnections)
@@ -395,6 +399,9 @@ type WebsocketNetwork struct {
// outgoingMessagesBufferSize is the size used for outgoing messages.
outgoingMessagesBufferSize int
+ // wsMaxHeaderBytes is the maximum accepted size of the header prior to upgrading to websocket connection.
+ wsMaxHeaderBytes int64
+
// slowWritingPeerMonitorInterval defines the interval between two consecutive tests for slow peer writing
slowWritingPeerMonitorInterval time.Duration
@@ -757,6 +764,8 @@ func (wn *WebsocketNetwork) setup() {
config.Consensus[protocol.ConsensusCurrentVersion].DownCommitteeSize),
)
+ wn.wsMaxHeaderBytes = wsMaxHeaderBytes
+
wn.identityTracker = NewIdentityTracker()
wn.broadcastQueueHighPrio = make(chan broadcastRequest, wn.outgoingMessagesBufferSize)
@@ -842,8 +851,8 @@ func (wn *WebsocketNetwork) Start() {
wn.scheme = "http"
}
- // if PublicAddress set to automatic, pull the name from Address()
- if wn.config.PublicAddress == autoconfigPublicAddress {
+ // if PublicAddress set to testing, pull the name from Address()
+ if wn.config.PublicAddress == testingPublicAddress {
addr, ok := wn.Address()
if ok {
url, err := url.Parse(addr)
@@ -2192,9 +2201,11 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) {
EnableCompression: false,
NetDialContext: wn.dialer.DialContext,
NetDial: wn.dialer.Dial,
+ MaxHeaderSize: wn.wsMaxHeaderBytes,
}
conn, response, err := websocketDialer.DialContext(wn.ctx, gossipAddr, requestHeader)
+
if err != nil {
if err == websocket.ErrBadHandshake {
// reading here from ioutil is safe only because it came from DialContext above, which already finished reading all the data from the network
diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go
index c08042c46..3229e2ae2 100644
--- a/network/wsNetwork_test.go
+++ b/network/wsNetwork_test.go
@@ -27,6 +27,7 @@ import (
"math/rand"
"net"
"net/http"
+ "net/http/httptest"
"net/url"
"os"
"runtime"
@@ -41,6 +42,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/algorand/go-deadlock"
+ "github.com/algorand/websocket"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
@@ -54,6 +56,8 @@ import (
const sendBufferLength = 1000
+const genesisID = "go-test-network-genesis"
+
func init() {
// this allows test code to use out-of-protocol message tags and have them go through
allowCustomTags = true
@@ -127,7 +131,7 @@ func makeTestWebsocketNodeWithConfig(t testing.TB, conf config.Local, opts ...te
log: log,
config: conf,
phonebook: MakePhonebook(1, 1*time.Millisecond),
- GenesisID: "go-test-network-genesis",
+ GenesisID: genesisID,
NetworkID: config.Devtestnet,
}
// apply options to newly-created WebsocketNetwork, if provided
@@ -990,7 +994,7 @@ func makeTestFilterWebsocketNode(t *testing.T, nodename string) *WebsocketNetwor
log: logging.TestingLog(t).With("node", nodename),
config: dc,
phonebook: MakePhonebook(1, 1*time.Millisecond),
- GenesisID: "go-test-network-genesis",
+ GenesisID: genesisID,
NetworkID: config.Devtestnet,
}
require.True(t, wn.config.EnableIncomingMessageFilter)
@@ -1131,25 +1135,25 @@ func TestGetPeers(t *testing.T) {
assert.Equal(t, expectAddrs, peerAddrs)
}
-// confirms that if the config PublicAddress is set to "auto",
+// confirms that if the config PublicAddress is set to "testing",
// PublicAddress is loaded when possible with the value of Address()
-func TestAutoPublicAddress(t *testing.T) {
+func TestTestingPublicAddress(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
netA := makeTestWebsocketNode(t)
- netA.config.PublicAddress = "auto"
+ netA.config.PublicAddress = "testing"
netA.config.GossipFanout = 1
netA.Start()
time.Sleep(100 * time.Millisecond)
- // check that "auto" has been overloaded
+ // check that "testing" has been overloaded
addr, ok := netA.Address()
addr = hostAndPort(addr)
require.True(t, ok)
- require.NotEqual(t, "auto", netA.PublicAddress())
+ require.NotEqual(t, "testing", netA.PublicAddress())
require.Equal(t, addr, netA.PublicAddress())
}
@@ -1231,12 +1235,12 @@ func TestPeeringWithIdentityChallenge(t *testing.T) {
netA := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netA"})
netA.identityTracker = newMockIdentityTracker(netA.identityTracker)
- netA.config.PublicAddress = "auto"
+ netA.config.PublicAddress = "testing"
netA.config.GossipFanout = 1
netB := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netB"})
netB.identityTracker = newMockIdentityTracker(netB.identityTracker)
- netB.config.PublicAddress = "auto"
+ netB.config.PublicAddress = "testing"
netB.config.GossipFanout = 1
netA.Start()
@@ -1380,12 +1384,12 @@ func TestPeeringSenderIdentityChallengeOnly(t *testing.T) {
netA := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netA"})
netA.identityTracker = newMockIdentityTracker(netA.identityTracker)
- netA.config.PublicAddress = "auto"
+ netA.config.PublicAddress = "testing"
netA.config.GossipFanout = 1
netB := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netB"})
netB.identityTracker = newMockIdentityTracker(netB.identityTracker)
- //netB.config.PublicAddress = "auto"
+ //netB.config.PublicAddress = "testing"
netB.config.GossipFanout = 1
netA.Start()
@@ -1445,12 +1449,12 @@ func TestPeeringReceiverIdentityChallengeOnly(t *testing.T) {
netA := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netA"})
netA.identityTracker = newMockIdentityTracker(netA.identityTracker)
- //netA.config.PublicAddress = "auto"
+ //netA.config.PublicAddress = "testing"
netA.config.GossipFanout = 1
netB := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netB"})
netB.identityTracker = newMockIdentityTracker(netB.identityTracker)
- netB.config.PublicAddress = "auto"
+ netB.config.PublicAddress = "testing"
netB.config.GossipFanout = 1
netA.Start()
@@ -1512,7 +1516,7 @@ func TestPeeringIncorrectDeduplicationName(t *testing.T) {
netA := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netA"})
netA.identityTracker = newMockIdentityTracker(netA.identityTracker)
- netA.config.PublicAddress = "auto"
+ netA.config.PublicAddress = "testing"
netA.config.GossipFanout = 1
netB := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netB"})
@@ -1701,7 +1705,7 @@ func TestPeeringWithBadIdentityChallenge(t *testing.T) {
t.Logf("Running Peering with Identity Challenge Test: %s", tc.name)
netA := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netA"})
netA.identityTracker = newMockIdentityTracker(netA.identityTracker)
- netA.config.PublicAddress = "auto"
+ netA.config.PublicAddress = "testing"
netA.config.GossipFanout = 1
scheme := newMockIdentityScheme(t)
@@ -1710,7 +1714,7 @@ func TestPeeringWithBadIdentityChallenge(t *testing.T) {
netB := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netB"})
netB.identityTracker = newMockIdentityTracker(netB.identityTracker)
- netB.config.PublicAddress = "auto"
+ netB.config.PublicAddress = "testing"
netB.config.GossipFanout = 1
netA.Start()
@@ -1844,12 +1848,12 @@ func TestPeeringWithBadIdentityChallengeResponse(t *testing.T) {
t.Logf("Running Peering with Identity Challenge Response Test: %s", tc.name)
netA := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netA"})
netA.identityTracker = newMockIdentityTracker(netA.identityTracker)
- netA.config.PublicAddress = "auto"
+ netA.config.PublicAddress = "testing"
netA.config.GossipFanout = 1
netB := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netB"})
netB.identityTracker = newMockIdentityTracker(netB.identityTracker)
- netB.config.PublicAddress = "auto"
+ netB.config.PublicAddress = "testing"
netB.config.GossipFanout = 1
scheme := newMockIdentityScheme(t)
@@ -1997,7 +2001,7 @@ func TestPeeringWithBadIdentityVerification(t *testing.T) {
t.Logf("Running Peering with Identity Verification Test: %s", tc.name)
netA := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netA"})
netA.identityTracker = newMockIdentityTracker(netA.identityTracker)
- netA.config.PublicAddress = "auto"
+ netA.config.PublicAddress = "testing"
netA.config.GossipFanout = 1
scheme := newMockIdentityScheme(t)
@@ -2006,7 +2010,7 @@ func TestPeeringWithBadIdentityVerification(t *testing.T) {
netB := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netB"})
netB.identityTracker = newMockIdentityTracker(netB.identityTracker)
- netB.config.PublicAddress = "auto"
+ netB.config.PublicAddress = "testing"
netB.config.GossipFanout = 1
// if the key is occupied, make the tracker fail to insert the peer
if tc.occupied {
@@ -2462,7 +2466,7 @@ func TestSlowPeerDisconnection(t *testing.T) {
log: log,
config: defaultConfig,
phonebook: MakePhonebook(1, 1*time.Millisecond),
- GenesisID: "go-test-network-genesis",
+ GenesisID: genesisID,
NetworkID: config.Devtestnet,
slowWritingPeerMonitorInterval: time.Millisecond * 50,
}
@@ -2537,7 +2541,7 @@ func TestForceMessageRelaying(t *testing.T) {
log: log,
config: defaultConfig,
phonebook: MakePhonebook(1, 1*time.Millisecond),
- GenesisID: "go-test-network-genesis",
+ GenesisID: genesisID,
NetworkID: config.Devtestnet,
}
wn.setup()
@@ -2631,7 +2635,7 @@ func TestCheckProtocolVersionMatch(t *testing.T) {
log: log,
config: defaultConfig,
phonebook: MakePhonebook(1, 1*time.Millisecond),
- GenesisID: "go-test-network-genesis",
+ GenesisID: genesisID,
NetworkID: config.Devtestnet,
}
wn.setup()
@@ -3757,3 +3761,187 @@ func TestWebsocketNetworkTelemetryTCP(t *testing.T) {
t.Log("closed detailsA", string(pcdA))
t.Log("closed detailsB", string(pcdB))
}
+
+type mockServer struct {
+ *httptest.Server
+ URL string
+ t *testing.T
+
+ waitForClientClose bool
+}
+
+type mockHandler struct {
+ *testing.T
+ s *mockServer
+}
+
+var mockUpgrader = websocket.Upgrader{
+ ReadBufferSize: 1024,
+ WriteBufferSize: 1024,
+ EnableCompression: true,
+ Error: func(w http.ResponseWriter, r *http.Request, status int, reason error) {
+ http.Error(w, reason.Error(), status)
+ },
+}
+
+func buildWsResponseHeader() http.Header {
+ h := http.Header{}
+ h.Add(ProtocolVersionHeader, ProtocolVersion)
+ h.Add(GenesisHeader, genesisID)
+ h.Add(NodeRandomHeader, "randomHeader")
+ return h
+}
+
+func (t mockHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ // Set the required headers to successfully establish a connection
+ ws, err := mockUpgrader.Upgrade(w, r, buildWsResponseHeader())
+ if err != nil {
+ t.Logf("Upgrade: %v", err)
+ return
+ }
+ defer ws.Close()
+ // Send a message of interest immediately after the connection is established
+ wr, err := ws.NextWriter(websocket.BinaryMessage)
+ if err != nil {
+ t.Logf("NextWriter: %v", err)
+ return
+ }
+
+ bytes := MarshallMessageOfInterest([]protocol.Tag{protocol.AgreementVoteTag})
+ msgBytes := append([]byte(protocol.MsgOfInterestTag), bytes...)
+ _, err = wr.Write(msgBytes)
+ if err != nil {
+ t.Logf("Error writing MessageOfInterest: %v", err)
+ return
+ }
+ wr.Close()
+
+ for true {
+ // echo a message back to the client
+ _, _, err := ws.NextReader()
+ if err != nil {
+ if _, ok := err.(*websocket.CloseError); ok && t.s.waitForClientClose {
+ t.Log("got client close")
+ return
+ }
+ return
+ }
+ }
+}
+
+func makeWsProto(s string) string {
+ return "ws" + strings.TrimPrefix(s, "http")
+}
+
+func newServer(t *testing.T) *mockServer {
+ var s mockServer
+ s.Server = httptest.NewServer(mockHandler{t, &s})
+ s.Server.URL += ""
+ s.URL = makeWsProto(s.Server.URL)
+ return &s
+}
+
+func TestMaxHeaderSize(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ netA := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netA"})
+ netA.config.GossipFanout = 1
+
+ netB := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netB"})
+ netB.config.GossipFanout = 1
+
+ netA.Start()
+ defer netA.Stop()
+ netB.Start()
+ defer netB.Stop()
+
+ addrB, ok := netB.Address()
+ require.True(t, ok)
+ gossipB, err := netB.addrToGossipAddr(addrB)
+ require.NoError(t, err)
+
+ // First make sure that the regular connection with default max header size works
+ netA.wsMaxHeaderBytes = wsMaxHeaderBytes
+ netA.wg.Add(1)
+ netA.tryConnect(addrB, gossipB)
+ time.Sleep(250 * time.Millisecond)
+ assert.Equal(t, 1, len(netA.peers))
+
+ netA.removePeer(netA.peers[0], disconnectReasonNone)
+ assert.Zero(t, len(netA.peers))
+
+ // Now try to connect with a max header size that is too small
+ logBuffer := bytes.NewBuffer(nil)
+ netA.log.SetOutput(logBuffer)
+
+ netA.wsMaxHeaderBytes = 128
+ netA.wg.Add(1)
+ netA.tryConnect(addrB, gossipB)
+ lg := logBuffer.String()
+ logBuffer.Reset()
+ time.Sleep(250 * time.Millisecond)
+ assert.Contains(t, lg, fmt.Sprintf("ws connect(%s) fail:", gossipB))
+ assert.Zero(t, len(netA.peers))
+
+ // Test that setting 0 disables the max header size check
+ netA.wsMaxHeaderBytes = 0
+ netA.wg.Add(1)
+ netA.tryConnect(addrB, gossipB)
+ time.Sleep(250 * time.Millisecond)
+ assert.Equal(t, 1, len(netA.peers))
+}
+
+func TestTryConnectEarlyWrite(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ netA := makeTestWebsocketNode(t, testWebsocketLogNameOption{"netA"})
+ netA.config.GossipFanout = 1
+
+ s := newServer(t)
+ s.waitForClientClose = true
+ defer s.Close()
+
+ netA.Start()
+ defer netA.Stop()
+
+ dialer := websocket.Dialer{}
+ mconn, resp, _ := dialer.Dial(s.URL, nil)
+ expectedHeader := buildWsResponseHeader()
+ for k, v := range expectedHeader {
+ assert.Equal(t, v[0], resp.Header.Get(k))
+ }
+
+ // Fixed overhead of the full status line "HTTP/1.1 101 Switching Protocols" (32) + 4 bytes for two instance of CRLF
+ // one after the status line and one to separate headers from the body
+ minValidHeaderSize := 36
+ for k, v := range resp.Header {
+ minValidHeaderSize += len(k) + len(v[0]) + 4 // + 4 is for the ": " and CRLF
+ }
+ mconn.Close()
+
+ // Setting the max header size to 1 byte less than the minimum header size should fail
+ netA.wsMaxHeaderBytes = int64(minValidHeaderSize) - 1
+ netA.wg.Add(1)
+ netA.tryConnect(s.URL, s.URL)
+ time.Sleep(250 * time.Millisecond)
+ assert.Len(t, netA.peers, 0)
+
+ // Now set the max header size to the minimum header size and it should succeed
+ netA.wsMaxHeaderBytes = int64(minValidHeaderSize)
+ netA.wg.Add(1)
+ netA.tryConnect(s.URL, s.URL)
+ p := netA.peers[0]
+ var messageCount uint64
+ for x := 0; x < 1000; x++ {
+ messageCount = atomic.LoadUint64(&p.miMessageCount)
+ if messageCount == 1 {
+ break
+ }
+ time.Sleep(2 * time.Millisecond)
+ }
+
+ // Confirm that we successfuly received a message of interest
+ assert.Len(t, netA.peers, 1)
+ fmt.Printf("MI Message Count: %v\n", netA.peers[0].miMessageCount)
+ assert.Equal(t, uint64(1), netA.peers[0].miMessageCount)
+}
diff --git a/node/error.go b/node/error.go
index dbe036b60..d177f0c87 100644
--- a/node/error.go
+++ b/node/error.go
@@ -62,24 +62,3 @@ func (e *CatchpointUnableToStartError) Error() string {
e.catchpointRequested,
e.catchpointRunning)
}
-
-// CatchpointSyncRoundFailure indicates that the requested catchpoint is beyond the currently set sync round
-type CatchpointSyncRoundFailure struct {
- catchpoint string
- syncRound uint64
-}
-
-// MakeCatchpointSyncRoundFailure creates the error type
-func MakeCatchpointSyncRoundFailure(catchpoint string, syncRound uint64) *CatchpointSyncRoundFailure {
- return &CatchpointSyncRoundFailure{
- catchpoint: catchpoint,
- syncRound: syncRound,
- }
-}
-
-// Error satisfies the builtin `error` interface
-func (e *CatchpointSyncRoundFailure) Error() string {
- return fmt.Sprintf(
- "unable to start catchpoint catchup for '%s' - resulting round is beyond current sync round '%v'",
- e.catchpoint, e.syncRound)
-}
diff --git a/node/follower_node.go b/node/follower_node.go
index ac8cad28e..ae770c711 100644
--- a/node/follower_node.go
+++ b/node/follower_node.go
@@ -240,7 +240,7 @@ func (node *AlgorandFollowerNode) BroadcastInternalSignedTxGroup(_ []transaction
// Simulate speculatively runs a transaction group against the current
// blockchain state and returns the effects and/or errors that would result.
-func (node *AlgorandFollowerNode) Simulate(_ []transactions.SignedTxn) (result simulation.Result, err error) {
+func (node *AlgorandFollowerNode) Simulate(_ simulation.Request) (result simulation.Result, err error) {
err = fmt.Errorf("cannot simulate in data mode")
return
}
@@ -342,14 +342,7 @@ func (node *AlgorandFollowerNode) StartCatchup(catchpoint string) error {
}
return MakeCatchpointUnableToStartError(stats.CatchpointLabel, catchpoint)
}
- cpRound, _, err := ledgercore.ParseCatchpointLabel(catchpoint)
- if err != nil {
- return err
- }
- sRound := node.GetSyncRound()
- if sRound > 0 && uint64(cpRound) > sRound {
- return MakeCatchpointSyncRoundFailure(catchpoint, sRound)
- }
+ var err error
accessor := ledger.MakeCatchpointCatchupAccessor(node.ledger.Ledger, node.log)
node.catchpointCatchupService, err = catchup.MakeNewCatchpointCatchupService(catchpoint, node, node.log, node.net, accessor, node.config)
if err != nil {
@@ -389,7 +382,7 @@ func (node *AlgorandFollowerNode) SetCatchpointCatchupMode(catchpointCatchupMode
outCtxCh = ctxCh
go func() {
node.mu.Lock()
- // check that the node wasn't canceled. If it have been canceled, it means that the node.Stop() was called, in which case
+ // check that the node wasn't canceled. If it has been canceled, it means that the node.Stop() was called, in which case
// we should close the channel.
if node.ctx.Err() == context.Canceled {
close(ctxCh)
@@ -414,7 +407,15 @@ func (node *AlgorandFollowerNode) SetCatchpointCatchupMode(catchpointCatchupMode
prevNodeCancelFunc()
return
}
+
+ // Catchup finished, resume.
defer node.mu.Unlock()
+
+ // update sync round before starting services
+ if err := node.SetSyncRound(uint64(node.ledger.LastRound())); err != nil {
+ node.log.Warnf("unable to set sync round while resuming fast catchup: %v", err)
+ }
+
// start
node.catchupService.Start()
node.blockService.Start()
@@ -447,3 +448,15 @@ func (node *AlgorandFollowerNode) GetSyncRound() uint64 {
func (node *AlgorandFollowerNode) UnsetSyncRound() {
node.catchupService.UnsetDisableSyncRound()
}
+
+// SetBlockTimeStampOffset sets a timestamp offset in the block header.
+// This is only available in dev mode.
+func (node *AlgorandFollowerNode) SetBlockTimeStampOffset(offset int64) error {
+ return fmt.Errorf("cannot set block timestamp offset in follower mode")
+}
+
+// GetBlockTimeStampOffset gets a timestamp offset.
+// This is only available in dev mode.
+func (node *AlgorandFollowerNode) GetBlockTimeStampOffset() (*int64, error) {
+ return nil, fmt.Errorf("cannot get block timestamp offset in follower mode")
+}
diff --git a/node/follower_node_test.go b/node/follower_node_test.go
index 33a004acd..17dbd52ca 100644
--- a/node/follower_node_test.go
+++ b/node/follower_node_test.go
@@ -17,10 +17,12 @@
package node
import (
+ "context"
"testing"
"github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/test"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/agreement"
@@ -29,6 +31,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/ledger/simulation"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
@@ -112,7 +115,7 @@ func TestErrors(t *testing.T) {
node := setupFollowNode(t)
require.Error(t, node.BroadcastSignedTxGroup([]transactions.SignedTxn{}))
require.Error(t, node.BroadcastInternalSignedTxGroup([]transactions.SignedTxn{}))
- _, err := node.Simulate([]transactions.SignedTxn{})
+ _, err := node.Simulate(simulation.Request{})
require.Error(t, err)
_, err = node.GetParticipationKey(account.ParticipationID{})
require.Error(t, err)
@@ -148,67 +151,21 @@ func TestDevModeWarning(t *testing.T) {
require.Contains(t, foundEntry.Message, "Follower running on a devMode network. Must submit txns to a different node.")
}
-// TestSyncRoundWithRemake extends TestSyncRound to simulate starting and stopping the network
-func TestSyncRoundWithRemake(t *testing.T) {
+func TestFastCatchupResume(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
+ node := setupFollowNode(t)
+ node.ctx = context.Background()
- maxAcctLookback := uint64(100)
-
- followNode, tempDir := remakeableFollowNode(t, "", maxAcctLookback)
- addBlock := func(round basics.Round) {
- b := bookkeeping.Block{
- BlockHeader: bookkeeping.BlockHeader{
- GenesisHash: followNode.ledger.GenesisHash(),
- Round: round,
- RewardsState: bookkeeping.RewardsState{
- RewardsRate: 0,
- RewardsPool: poolAddr,
- FeeSink: sinkAddr,
- },
- },
- }
- b.CurrentProtocol = protocol.ConsensusCurrentVersion
- err := followNode.Ledger().AddBlock(b, agreement.Certificate{})
- require.NoError(t, err)
-
- status, err := followNode.Status()
- require.NoError(t, err)
- require.Equal(t, round, status.LastRound)
- }
-
- // Part I. redo TestSyncRound
- // main differences are:
- // * cfg.DisableNetworking = true
- // * cfg.MaxAcctLookback = 100 (instead of 4)
-
- addBlock(basics.Round(1))
-
- dbRound := uint64(followNode.Ledger().LatestTrackerCommitted())
- // Sync Round should be initialized to the ledger's dbRound + 1
- require.Equal(t, dbRound+1, followNode.GetSyncRound())
- // Set a new sync round
- require.NoError(t, followNode.SetSyncRound(dbRound+11))
- // Ensure it is persisted
- require.Equal(t, dbRound+11, followNode.GetSyncRound())
- // Unset the sync round and make sure get returns 0
- followNode.UnsetSyncRound()
- require.Equal(t, uint64(0), followNode.GetSyncRound())
-
- // Part II. fast forward and then remake the node
-
- newRound := basics.Round(2 * maxAcctLookback)
- for i := basics.Round(2); i <= newRound; i++ {
- addBlock(i)
- }
+ // Initialize sync round to a future round.
+ syncRound := uint64(10000)
+ node.SetSyncRound(syncRound)
+ require.Equal(t, syncRound, node.GetSyncRound())
- followNode, _ = remakeableFollowNode(t, tempDir, maxAcctLookback)
- status, err := followNode.Status()
- require.NoError(t, err)
- require.Equal(t, newRound, status.LastRound)
+ // Force catchpoint catchup mode to end, this should set the sync round to the current ledger round (0).
+ out := node.SetCatchpointCatchupMode(false)
+ <-out
- // syncRound should be at
- // newRound - maxAcctLookback + 1 = maxAcctLookback + 1
- syncRound := followNode.GetSyncRound()
- require.Equal(t, uint64(maxAcctLookback+1), syncRound)
+ // Verify the sync was reset.
+ assert.Equal(t, uint64(0), node.GetSyncRound())
}
diff --git a/node/node.go b/node/node.go
index abff864d4..3ff616da6 100644
--- a/node/node.go
+++ b/node/node.go
@@ -21,6 +21,7 @@ import (
"context"
"errors"
"fmt"
+ "math"
"os"
"path/filepath"
"strings"
@@ -129,10 +130,11 @@ type AlgorandFullNode struct {
indexer *indexer.Indexer
- rootDir string
- genesisID string
- genesisHash crypto.Digest
- devMode bool // is this node operates in a developer mode ? ( benign agreement, broadcasting transaction generates a new block )
+ rootDir string
+ genesisID string
+ genesisHash crypto.Digest
+ devMode bool // is this node operating in a developer mode ? ( benign agreement, broadcasting transaction generates a new block )
+ timestampOffset *int64
log logging.Logger
@@ -326,17 +328,7 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
node.tracer = messagetracer.NewTracer(log).Init(cfg)
gossip.SetTrace(agreementParameters.Network, node.tracer)
- // Delete the deprecated database file if it exists. This can be removed in future updates since this file should not exist by then.
- oldCompactCertPath := filepath.Join(genesisDir, "compactcert.sqlite")
- os.Remove(oldCompactCertPath)
-
- stateProofPathname := filepath.Join(genesisDir, config.StateProofFileName)
- stateProofAccess, err := db.MakeAccessor(stateProofPathname, false, false)
- if err != nil {
- log.Errorf("Cannot load state proof data: %v", err)
- return nil, err
- }
- node.stateProofWorker = stateproof.NewWorker(stateProofAccess, node.log, node.accountManager, node.ledger.Ledger, node.net, node)
+ node.stateProofWorker = stateproof.NewWorker(genesisDir, node.log, node.accountManager, node.ledger.Ledger, node.net, node)
return node, err
}
@@ -428,8 +420,6 @@ func (node *AlgorandFullNode) Stop() {
defer func() {
node.mu.Unlock()
node.waitMonitoringRoutines()
- node.stateProofWorker.Shutdown()
- node.stateProofWorker = nil
}()
node.net.ClearHandlers()
@@ -439,6 +429,7 @@ func (node *AlgorandFullNode) Stop() {
if node.catchpointCatchupService != nil {
node.catchpointCatchupService.Stop()
} else {
+ node.stateProofWorker.Stop()
node.txHandler.Stop()
node.agreementService.Shutdown()
node.catchupService.Stop()
@@ -480,6 +471,24 @@ func (node *AlgorandFullNode) writeDevmodeBlock() (err error) {
return
}
+ // Make a new validated block.
+ prevRound := vb.Block().Round() - 1
+ prev, err := node.ledger.BlockHdr(prevRound)
+ if err != nil {
+ return err
+ }
+
+ blk := vb.Block()
+
+ // Set block timestamp based on offset, if set.
+ // Make sure block timestamp is not greater than MaxInt64.
+ if node.timestampOffset != nil && *node.timestampOffset < math.MaxInt64-prev.TimeStamp {
+ blk.TimeStamp = prev.TimeStamp + *node.timestampOffset
+ }
+ blk.BlockHeader.Seed = committee.Seed(prev.Hash())
+ vb2 := ledgercore.MakeValidatedBlock(blk, vb.Delta())
+ vb = &vb2
+
// add the newly generated block to the ledger
err = node.ledger.AddValidatedBlock(*vb, agreement.Certificate{Round: vb.Block().Round()})
return err
@@ -556,9 +565,9 @@ func (node *AlgorandFullNode) broadcastSignedTxGroup(txgroup []transactions.Sign
// Simulate speculatively runs a transaction group against the current
// blockchain state and returns the effects and/or errors that would result.
-func (node *AlgorandFullNode) Simulate(txgroup []transactions.SignedTxn) (result simulation.Result, err error) {
+func (node *AlgorandFullNode) Simulate(request simulation.Request) (result simulation.Result, err error) {
simulator := simulation.MakeSimulator(node.ledger)
- return simulator.Simulate(txgroup)
+ return simulator.Simulate(request)
}
// ListTxns returns SignedTxns associated with a specific account in a range of Rounds (inclusive).
@@ -660,6 +669,18 @@ func (node *AlgorandFullNode) GetPendingTransaction(txID transactions.Txid) (res
minRound++
}
+ // If we did find the transaction, we know there is no point
+ // checking rounds earlier or later than validity rounds
+ if found {
+ if tx.Txn.FirstValid > minRound {
+ minRound = tx.Txn.FirstValid
+ }
+
+ if tx.Txn.LastValid < maxRound {
+ maxRound = tx.Txn.LastValid
+ }
+ }
+
for r := maxRound; r >= minRound; r-- {
tx, found, err := node.ledger.LookupTxid(txID, r)
if err != nil || !found {
@@ -1208,6 +1229,7 @@ func (node *AlgorandFullNode) SetCatchpointCatchupMode(catchpointCatchupMode boo
node.waitMonitoringRoutines()
}()
node.net.ClearHandlers()
+ node.stateProofWorker.Stop()
node.txHandler.Stop()
node.agreementService.Shutdown()
node.catchupService.Stop()
@@ -1233,6 +1255,7 @@ func (node *AlgorandFullNode) SetCatchpointCatchupMode(catchpointCatchupMode boo
node.blockService.Start()
node.ledgerService.Start()
node.txHandler.Start()
+ node.stateProofWorker.Start()
// start indexer
if idx, err := node.Indexer(); err == nil {
@@ -1427,3 +1450,22 @@ func (node *AlgorandFullNode) GetSyncRound() uint64 {
// UnsetSyncRound no-ops
func (node *AlgorandFullNode) UnsetSyncRound() {
}
+
+// SetBlockTimeStampOffset sets a timestamp offset in the block header.
+// This is only available in dev mode.
+func (node *AlgorandFullNode) SetBlockTimeStampOffset(offset int64) error {
+ if node.devMode {
+ node.timestampOffset = &offset
+ return nil
+ }
+ return fmt.Errorf("cannot set block timestamp offset when not in dev mode")
+}
+
+// GetBlockTimeStampOffset gets a timestamp offset.
+// This is only available in dev mode.
+func (node *AlgorandFullNode) GetBlockTimeStampOffset() (*int64, error) {
+ if node.devMode {
+ return node.timestampOffset, nil
+ }
+ return nil, fmt.Errorf("cannot get block timestamp offset when not in dev mode")
+}
diff --git a/nodecontrol/algodControl.go b/nodecontrol/algodControl.go
index 85c9df30b..3702e2116 100644
--- a/nodecontrol/algodControl.go
+++ b/nodecontrol/algodControl.go
@@ -23,6 +23,7 @@ import (
"os/exec"
"path/filepath"
"strconv"
+ "strings"
"time"
"github.com/algorand/go-algorand/config"
@@ -85,6 +86,13 @@ func (nc NodeController) ServerURL() (url.URL, error) {
if err != nil {
return url.URL{}, err
}
+ if strings.HasPrefix(addr, "http:") || strings.HasPrefix(addr, "https:") {
+ u, err := url.Parse(addr)
+ if err != nil {
+ return url.URL{}, err
+ }
+ return *u, nil
+ }
return url.URL{Scheme: "http", Host: addr}, nil
}
diff --git a/protocol/consensus.go b/protocol/consensus.go
index 00857875d..54aa5939d 100644
--- a/protocol/consensus.go
+++ b/protocol/consensus.go
@@ -182,7 +182,7 @@ const ConsensusV33 = ConsensusVersion(
"https://github.com/algorandfoundation/specs/tree/830a4e673148498cc7230a0d1ba1ed0a5471acc6",
)
-// ConsensusV34 enables the TEAL v7 opcodes, stateproofs, shorter lambda.
+// ConsensusV34 enables the TEAL v7 opcodes, stateproofs, shorter lambda to 1.7s.
const ConsensusV34 = ConsensusVersion(
"https://github.com/algorandfoundation/specs/tree/2dd5435993f6f6d65691140f592ebca5ef19ffbd",
)
@@ -192,11 +192,25 @@ const ConsensusV35 = ConsensusVersion(
"https://github.com/algorandfoundation/specs/tree/433d8e9a7274b6fca703d91213e05c7e6a589e69",
)
-// ConsensusV36 adds box storage
+// ConsensusV36 adds box storage in TEAL v8
const ConsensusV36 = ConsensusVersion(
"https://github.com/algorandfoundation/specs/tree/44fa607d6051730f5264526bf3c108d51f0eadb6",
)
+// ConsensusV37 is a technical upgrade and released in the same time as ConsensusV38.
+// It is needed to allow nodes to build up a necessary state to support state proofs related
+// options in ConsensusV38
+const ConsensusV37 = ConsensusVersion(
+ "https://github.com/algorandfoundation/specs/tree/1ac4dd1f85470e1fb36c8a65520e1313d7dfed5e",
+)
+
+// ConsensusV38 enables state proof verification using a special tracker,
+// TEAL v9 resources sharing, pre-check ECDSA curve and extra features, and
+// shortens the lambda to 1.5s.
+const ConsensusV38 = ConsensusVersion(
+ "https://github.com/algorandfoundation/specs/tree/abd3d4823c6f77349fc04c3af7b1e99fe4df699f",
+)
+
// ConsensusFuture is a protocol that should not appear in any production
// network, but is used to test features before they are released.
const ConsensusFuture = ConsensusVersion(
@@ -226,7 +240,7 @@ const ConsensusVAlpha5 = ConsensusVersion("alpha5")
// ConsensusCurrentVersion is the latest version and should be used
// when a specific version is not provided.
-const ConsensusCurrentVersion = ConsensusV36
+const ConsensusCurrentVersion = ConsensusV38
// Error is used to indicate that an unsupported protocol has been detected.
type Error ConsensusVersion
diff --git a/protocol/hash.go b/protocol/hash.go
index 26975a402..434deaf8c 100644
--- a/protocol/hash.go
+++ b/protocol/hash.go
@@ -68,6 +68,7 @@ const (
StateProofMessage HashID = "spm"
StateProofPart HashID = "spp"
StateProofSig HashID = "sps"
+ StateProofVerCtx HashID = "spv"
TestHashable HashID = "TE"
TxGroup HashID = "TG"
diff --git a/scripts/algorand_node_log.json b/scripts/algorand_node_log.json
new file mode 100644
index 000000000..78659dc3a
--- /dev/null
+++ b/scripts/algorand_node_log.json
@@ -0,0 +1,58 @@
+{
+ "algorand_node_log" : {
+ "title" : "Algorand Node Log Format",
+ "description" : "Log format for logrus, used by go-algorand.",
+ "url" : "https://github.com/sirupsen/logrus",
+ "level-field" : "level",
+ "timestamp-field" : "time",
+ "json": true,
+ "body-field": "msg",
+ "line-format" : [
+ { "field" : "time", "timestamp-format": "%b %d %H:%M:%S" },
+ " ",
+ { "field" : "level", "text-transform": "uppercase", "min-width": 4, "max-width": 4, "overflow": "truncate"},
+ " ",
+ "[", { "field": "file" }, ":", {"field": "line"}, "]",
+ " ",
+ { "field": "Context" },
+ " ",
+ { "field" : "msg" },
+ " [", { "field": "function" }, "]"
+ ],
+ "hide-extra": true,
+ "level" : {
+ "info" : "info",
+ "error" : "error",
+ "warning" : "warning",
+ "debug" : "debug"
+ },
+ "value": {
+ "file": {
+ "kind": "string",
+ "identifier": true
+ },
+ "line": {
+ "kind": "integer",
+ "foreign-key": true
+ },
+ "function": {
+ "kind": "string",
+ "identifier": true
+ },
+ "Context": {
+ "kind": "string",
+ "identifier": true
+ }
+ },
+ "sample": [
+ {
+ "line": "{\"file\":\"trackerdbV2.go\",\"function\":\"github.com/algorand/go-algorand/ledger/store.(*trackerDBSchemaInitializer).upgradeDatabaseSchema0\",\"level\":\"info\",\"line\":203,\"msg\":\"upgradeDatabaseSchema0 initializing schema\",\"name\":\"\",\"time\":\"2022-12-29T16:26:58.478738+02:00\"}",
+ "level": "info"
+ },
+ {
+ "line": "{\"Context\":\"sync\",\"details\":{\"StartRound\":0},\"file\":\"telemetry.go\",\"function\":\"github.com/algorand/go-algorand/logging.(*telemetryState).logTelemetry\",\"instanceName\":\"iFepr+AcMdoqEg+2\",\"level\":\"info\",\"line\":261,\"msg\":\"/ApplicationState/CatchupStart\",\"name\":\"\",\"session\":\"\",\"time\":\"2022-12-29T16:26:58.763458+02:00\",\"v\":\"3.14.167910\"}",
+ "level": "info"
+ }
+ ]
+ }
+}
diff --git a/scripts/build_package.sh b/scripts/build_package.sh
index d8f169201..7b538329f 100755
--- a/scripts/build_package.sh
+++ b/scripts/build_package.sh
@@ -106,7 +106,7 @@ TOOLS_ROOT=${PKG_ROOT}/tools
echo "Staging tools package files"
-bin_files=("algons" "coroner" "dispenser" "netgoal" "nodecfg" "pingpong" "cc_service" "cc_agent" "cc_client" "loadgenerator" "COPYING" "dsign" "catchpointdump")
+bin_files=("algons" "coroner" "dispenser" "netgoal" "nodecfg" "pingpong" "cc_service" "cc_agent" "cc_client" "loadgenerator" "COPYING" "dsign" "catchpointdump" "block-generator")
mkdir -p ${TOOLS_ROOT}
for bin in "${bin_files[@]}"; do
cp ${GOPATHBIN}/${bin} ${TOOLS_ROOT}
diff --git a/scripts/configure_dev.sh b/scripts/configure_dev.sh
index 6eb00df3d..df28dd5cc 100755
--- a/scripts/configure_dev.sh
+++ b/scripts/configure_dev.sh
@@ -85,6 +85,9 @@ elif [ "${OS}" = "darwin" ]; then
install_or_upgrade autoconf
install_or_upgrade automake
install_or_upgrade python3
+ install_or_upgrade lnav
+ install_or_upgrade diffutils
+ lnav -i "$SCRIPTPATH/algorand_node_log.json"
fi
elif [ "${OS}" = "windows" ]; then
if ! $msys2 pacman -S --disable-download-timeout --noconfirm git automake autoconf m4 libtool make mingw-w64-x86_64-gcc mingw-w64-x86_64-boost mingw-w64-x86_64-python mingw-w64-x86_64-jq unzip procps; then
diff --git a/scripts/dump_genesis.sh b/scripts/dump_genesis.sh
index 386924588..cf45dd593 100755
--- a/scripts/dump_genesis.sh
+++ b/scripts/dump_genesis.sh
@@ -76,6 +76,9 @@ for LEDGER in $LEDGERS; do
unfinishedcatchpoints)
SORT=round
;;
+ stateproofverification)
+ SORT=lastattestedround
+ ;;
kvstore)
SORT=key
;;
diff --git a/scripts/get_golang_version.sh b/scripts/get_golang_version.sh
index 4e3525a54..7036e716a 100755
--- a/scripts/get_golang_version.sh
+++ b/scripts/get_golang_version.sh
@@ -4,7 +4,7 @@
# and parsed as an array to check against the system's golang version depending
# upon the context in which the project is being built.
#
-# "dev" is to be used to satisfy the minium requirement we have to successfully
+# "dev" is to be used to satisfy the minimum requirement we have to successfully
# build the project.
#
# The default is to return the pinned version needed for our production builds.
diff --git a/scripts/release/mule/Makefile.mule b/scripts/release/mule/Makefile.mule
index ae40490c7..0c73cccf8 100644
--- a/scripts/release/mule/Makefile.mule
+++ b/scripts/release/mule/Makefile.mule
@@ -28,7 +28,7 @@ ci-integration:
SRCROOT=$(SRCPATH) \
test/scripts/e2e.sh -c $(CHANNEL) -n
-ci-build: ci-clean buildsrc ci-setup
+ci-build: ci-clean build ci-setup
CHANNEL=$(CHANNEL) PKG_ROOT=$(PKG_DIR) NO_BUILD=True VARIATIONS=$(OS_TYPE)-$(ARCH) \
scripts/build_packages.sh $(OS_TYPE)/$(ARCH) && \
mkdir -p $(PKG_DIR)/data && \
diff --git a/scripts/travis/codecov b/scripts/travis/codecov
index d443a8979..36513ce06 100755..100644
--- a/scripts/travis/codecov
+++ b/scripts/travis/codecov
@@ -5,7 +5,7 @@
set -e +o pipefail
-VERSION="1.0.3"
+VERSION="1.0.6"
codecov_flags=( )
url="https://codecov.io"
@@ -865,14 +865,17 @@ then
if [ "$GITHUB_HEAD_REF" != "" ];
then
# PR refs are in the format: refs/pull/7/merge
- pr="${GITHUB_REF#refs/pull/}"
- pr="${pr%/merge}"
+ if [[ "$GITHUB_REF" =~ ^refs\/pull\/[0-9]+\/merge$ ]];
+ then
+ pr="${GITHUB_REF#refs/pull/}"
+ pr="${pr%/merge}"
+ fi
branch="${GITHUB_HEAD_REF}"
fi
commit="${GITHUB_SHA}"
slug="${GITHUB_REPOSITORY}"
build="${GITHUB_RUN_ID}"
- build_url=$(urlencode "http://github.com/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}")
+ build_url=$(urlencode "${GITHUB_SERVER_URL:-https://github.com}/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}")
job="$(urlencode "${GITHUB_WORKFLOW}")"
# actions/checkout runs in detached HEAD
@@ -987,6 +990,7 @@ else
fi
+say " ${e}current dir: ${x} $PWD"
say " ${e}project root:${x} $git_root"
# find branch, commit, repo from git command
@@ -1618,7 +1622,7 @@ then
# [ or ]
syntax_list='^[[:space:]]*[][][[:space:]]*(//.*)?$'
# func ... {
- syntax_go_func='^[[:space:]]*[func].*[\{][[:space:]]*$'
+ syntax_go_func='^[[:space:]]*func[[:space:]]*[\{][[:space:]]*$'
# shellcheck disable=SC2089
skip_dirs="-not -path '*/$bower_components/*' \
@@ -1783,7 +1787,7 @@ if [ "$dump" != "0" ];
then
# trim whitespace from query
say " ${e}->${x} Dumping upload file (no upload)"
- echo "$url/upload/v4?$(echo "package=$package-$VERSION&token=$token&$query" | tr -d ' ')"
+ echo "$url/upload/v4?$(echo "package=$package-$VERSION&$query" | tr -d ' ')"
cat "$upload_file"
else
if [ "$save_to" != "" ];
@@ -1802,10 +1806,9 @@ else
say " ${e}url:${x} $url"
say " ${e}query:${x} $query"
- # Full query without token (to display on terminal output)
- queryNoToken=$(echo "package=$package-$VERSION&token=secret&$query" | tr -d ' ')
- # now add token to query
+ # Full query (to display on terminal output)
query=$(echo "package=$package-$VERSION&token=$token&$query" | tr -d ' ')
+ queryNoToken=$(echo "package=$package-$VERSION&token=<hidden>&$query" | tr -d ' ')
if [ "$ft_s3" = "1" ];
then
@@ -1817,6 +1820,7 @@ else
-H 'X-Reduced-Redundancy: false' \
-H 'X-Content-Type: application/x-gzip' \
-H 'Content-Length: 0' \
+ -H "X-Upload-Token: ${token}" \
--write-out "\n%{response_code}\n" \
$curlargs \
"$url/upload/v4?$query" || true)
@@ -1863,6 +1867,7 @@ else
-H 'Content-Type: text/plain' \
-H 'Content-Encoding: gzip' \
-H 'X-Content-Encoding: gzip' \
+ -H "X-Upload-Token: ${token}" \
-H 'Accept: text/plain' \
$curlargs \
"$url/upload/v2?$query&attempt=$i" || echo 'HTTP 500')
diff --git a/scripts/travis/codegen_verification.sh b/scripts/travis/codegen_verification.sh
index 9ac11f172..356b8444e 100755
--- a/scripts/travis/codegen_verification.sh
+++ b/scripts/travis/codegen_verification.sh
@@ -61,5 +61,16 @@ else
echo Enlistment is clean
fi
+echo Checking Tidiness...
+make tidy
+if [[ -n $(git status --porcelain) ]]; then
+ echo Dirty after go mod tidy - did you forget to run make tidy?
+ git status -s
+ git --no-pager diff
+ exit 1
+else
+ echo All tidy
+fi
+
# test binary compatibility
"${SCRIPTPATH}/../../test/platform/test_linux_amd64_compatibility.sh"
diff --git a/scripts/travis/upload_coverage.sh b/scripts/travis/upload_coverage.sh
index e861b7ef9..8ab3b56ea 100755
--- a/scripts/travis/upload_coverage.sh
+++ b/scripts/travis/upload_coverage.sh
@@ -1,15 +1,9 @@
#!/usr/bin/env bash
-# Print a warning when there is a new version notification before uploading the
-# coverage report to codecov.
set -eo pipefail
-# Check if there is a new version.
-curl -fLso codecov https://codecov.io/bash
-UPSTREAM_VERSION=$(grep -o 'VERSION=\"[0-9\.]*\"' codecov | cut -d'"' -f2)
-LOCAL_VERSION=$(grep -o 'VERSION=\"[0-9\.]*\"' scripts/travis/codecov | cut -d'"' -f2)
-if [[ "${UPSTREAM_VERSION}" != "${LOCAL_VERSION}" ]]; then
- echo "WARN: version ${UPSTREAM_VERSION} of the codecov upload script is available."
+if [[ -z "$CODECOV_TOKEN" ]]; then
+ /usr/bin/env bash scripts/travis/codecov
+else
+ /usr/bin/env bash scripts/travis/codecov -t $CODECOV_TOKEN
fi
-
-/usr/bin/env bash scripts/travis/codecov
diff --git a/scripts/upload_config.sh b/scripts/upload_config.sh
index 7ffaaca20..6ada497d2 100755
--- a/scripts/upload_config.sh
+++ b/scripts/upload_config.sh
@@ -1,5 +1,6 @@
#!/usr/bin/env bash
set -e
+trap 'echo "ERROR: ${BASH_SOURCE}:${LINENO} ${BASH_COMMAND}"' ERR
# upload_config.sh - Archives and uploads a netgoal configuration package from a specified directory
# NOTE: Will only work if you have the required AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY vars set
@@ -33,11 +34,11 @@ SRCPATH=${SCRIPTPATH}/..
export CHANNEL=$2
export FULLVERSION=$($SRCPATH/scripts/compute_build_number.sh -f)
-TEMPDIR=$(mktemp -d 2>/dev/null || mktemp -d -t "tmp")
+TEMPDIR=$(mktemp -d -t "upload_config.tmp.XXXXXX")
TARFILE=${TEMPDIR}/config_${CHANNEL}_${FULLVERSION}.tar.gz
cd $1
-tar -zcf ${TARFILE} * >/dev/null 2>&1
+tar -zcf ${TARFILE} * >/dev/null
${GOPATH}/bin/updater send -s ${TEMPDIR} -c ${CHANNEL} -b "${S3_RELEASE_BUCKET}"
rm ${TARFILE}
diff --git a/stateproof/abstractions.go b/stateproof/abstractions.go
index 8527060ae..552acd0b8 100644
--- a/stateproof/abstractions.go
+++ b/stateproof/abstractions.go
@@ -41,6 +41,8 @@ type Ledger interface {
GenesisHash() crypto.Digest
BlockHdr(basics.Round) (bookkeeping.BlockHeader, error)
VotersForStateProof(basics.Round) (*ledgercore.VotersForRound, error)
+ RegisterVotersCommitListener(listener ledgercore.VotersCommitListener)
+ UnregisterVotersCommitListener()
}
// Network captures the aspects of the gossip network protocol that are
diff --git a/stateproof/builder.go b/stateproof/builder.go
index fbb6813a9..28d5d2b47 100644
--- a/stateproof/builder.go
+++ b/stateproof/builder.go
@@ -20,6 +20,7 @@ import (
"context"
"database/sql"
"encoding/binary"
+ "errors"
"fmt"
"sort"
@@ -27,124 +28,284 @@ import (
"github.com/algorand/go-algorand/crypto/stateproof"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/stateproofmsg"
"github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/stateproof/verify"
)
-// makeBuilderForRound not threadsafe, should be called in a lock environment
-func (spw *Worker) makeBuilderForRound(rnd basics.Round) (builder, error) {
- l := spw.ledger
- hdr, err := l.BlockHdr(rnd)
+var errVotersNotTracked = errors.New("voters not tracked for the given lookback round")
+
+// spProver captures the state proof cryptographic prover in addition to data needed for
+// signatures aggregation.
+type spProver struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ *stateproof.Prover `codec:"prv"`
+
+ AddrToPos map[Address]uint64 `codec:"addr,allocbound=stateproof.VotersAllocBound"`
+ VotersHdr bookkeeping.BlockHeader `codec:"hdr"`
+ Message stateproofmsg.Message `codec:"msg"`
+}
+
+// OnPrepareVoterCommit is a function called by the voters tracker when it's preparing to commit rnd. It gives the builder
+// the chance to persist the data it needs.
+func (spw *Worker) OnPrepareVoterCommit(oldBase basics.Round, newBase basics.Round, votersFetcher ledgercore.LedgerForSPBuilder) {
+ for rnd := oldBase + 1; rnd <= newBase; rnd++ {
+ header, err := votersFetcher.BlockHdr(rnd)
+ if err != nil {
+ spw.log.Errorf("OnPrepareVoterCommit(%d): could not fetch round header: %v", rnd, err)
+ continue
+ }
+
+ proto := config.Consensus[header.CurrentProtocol]
+ if proto.StateProofInterval == 0 || uint64(rnd)%proto.StateProofInterval != 0 {
+ continue
+ }
+
+ var proverExists bool
+ err = spw.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ proverExists, err = proverExistInDB(tx, rnd)
+ return err
+ })
+ if err != nil {
+ spw.log.Warnf("OnPrepareVoterCommit(%d): could not check prover existence, assuming it doesn't exist: %v\n", rnd, err)
+ } else if proverExists {
+ continue
+ }
+
+ provr, err := createProver(rnd, votersFetcher)
+ if err != nil {
+ if errors.Is(err, errVotersNotTracked) {
+ // There are few reasons why we might encounter a situation where we don't
+ // have voters for a state proof round.
+ //
+ // 1 - When state proof chain starts, the first round s.t round % proto.stateproofInterval == 0 will not
+ // have voters (since they are not enable). For this round we will not create a state proof.
+ // e.g if proto.stateproofInterval == 10, and round = 10. We skip the state proof for that round
+ // (since there are not voters on round 0)
+ //
+ // 2 - When a node uses fastcatchup to some round, and immediately tries to create a builder.
+ // Node might fail to create the builder since MaxBalLookback (in catchpoint) might not be large enough
+ spw.log.Warnf("OnPrepareVoterCommit(%d): %v", rnd, err)
+ continue
+ }
+
+ spw.log.Errorf("OnPrepareVoterCommit(%d): could not create prover: %v", rnd, err)
+ continue
+ }
+
+ // At this point, there is a possibility that the signer has already created this specific builder
+ // (signer created the builder after proverExistInDB was called and was fast enough to persist it).
+ // In this case we will rewrite the new builder
+ err = spw.db.Atomic(func(_ context.Context, tx *sql.Tx) error {
+ return persistProver(tx, rnd, &provr)
+ })
+ if err != nil {
+ spw.log.Errorf("OnPrepareVoterCommit(%d): could not persist prover: %v", rnd, err)
+ }
+ }
+}
+
+// loadOrCreateProverWithSignatures either loads a state proof prover from the DB or creates a new prover.
+// this function fills the prover with all the available signatures
+func (spw *Worker) loadOrCreateProverWithSignatures(rnd basics.Round) (spProver, error) {
+ b, err := spw.loadOrCreateProver(rnd)
if err != nil {
- return builder{}, err
+ return spProver{}, err
}
- hdrProto := config.Consensus[hdr.CurrentProtocol]
- votersRnd := rnd.SubSaturate(basics.Round(hdrProto.StateProofInterval))
- votersHdr, err := l.BlockHdr(votersRnd)
+ if err := spw.loadSignaturesIntoProver(&b); err != nil {
+ return spProver{}, err
+ }
+ return b, nil
+}
+
+func (spw *Worker) loadOrCreateProver(rnd basics.Round) (spProver, error) {
+ var prover spProver
+ err := spw.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ prover, err = getProver(tx, rnd)
+ return err
+ })
+
+ if err == nil {
+ return prover, nil
+ }
+
+ if !errors.Is(err, sql.ErrNoRows) {
+ spw.log.Errorf("loadOrCreateProver: error while fetching builder from DB: %v", err)
+ }
+
+ prover, err = createProver(rnd, spw.ledger)
if err != nil {
- return builder{}, err
+ return spProver{}, err
}
- lookback := votersRnd.SubSaturate(basics.Round(hdrProto.StateProofVotersLookback))
- voters, err := l.VotersForStateProof(lookback)
+ err = spw.db.Atomic(func(_ context.Context, tx *sql.Tx) error {
+ return persistProver(tx, rnd, &prover)
+ })
+
+ // We ignore persisting errors because we still want to try and use our successfully generated builder,
+ // even if, for some reason, persisting it failed.
+ if err != nil {
+ spw.log.Errorf("loadOrCreateProver(%d): failed to insert prover into database: %v", rnd, err)
+ }
+
+ return prover, nil
+}
+
+func (spw *Worker) loadSignaturesIntoProver(prover *spProver) error {
+ var sigs []pendingSig
+ err := spw.db.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ var err2 error
+ sigs, err2 = getPendingSigsForRound(tx, basics.Round(prover.Round))
+ return err2
+ })
+ if err != nil {
+ return err
+ }
+
+ for i := range sigs {
+ err = prover.insertSig(&sigs[i], false)
+ if err != nil {
+ spw.log.Warn(err)
+ }
+ }
+ return nil
+}
+
+func createProver(rnd basics.Round, votersFetcher ledgercore.LedgerForSPBuilder) (spProver, error) {
+ // since this function might be invoked under tracker commit context (i.e invoked from the ledger code ),
+ // it is important that we do not use the ledger directly.
+
+ hdr, err := votersFetcher.BlockHdr(rnd)
if err != nil {
- return builder{}, err
+ return spProver{}, err
}
+ hdrProto := config.Consensus[hdr.CurrentProtocol]
+ votersRnd := rnd.SubSaturate(basics.Round(hdrProto.StateProofInterval))
+ lookback := votersRnd.SubSaturate(basics.Round(hdrProto.StateProofVotersLookback))
+ voters, err := votersFetcher.VotersForStateProof(lookback)
+ if err != nil {
+ return spProver{}, err
+ }
if voters == nil {
- // Voters not tracked for that round. Might not be a valid
- // state proof round; state proofs might not be enabled; etc.
- return builder{}, fmt.Errorf("voters not tracked for lookback round %d", lookback)
+ return spProver{}, fmt.Errorf("lookback round %d: %w", lookback, errVotersNotTracked)
}
- msg, err := GenerateStateProofMessage(l, uint64(votersHdr.Round), hdr)
+ votersHdr, err := votersFetcher.BlockHdr(votersRnd)
if err != nil {
- return builder{}, err
+ return spProver{}, err
+ }
+
+ msg, err := GenerateStateProofMessage(votersFetcher, rnd)
+ if err != nil {
+ return spProver{}, err
}
provenWeight, err := verify.GetProvenWeight(&votersHdr, &hdr)
if err != nil {
- return builder{}, err
+ return spProver{}, err
}
- var res builder
- res.votersHdr = votersHdr
- res.voters = voters
- res.message = msg
- res.Builder, err = stateproof.MakeBuilder(msg.Hash(),
- uint64(hdr.Round),
+ var res spProver
+ res.VotersHdr = votersHdr
+ res.AddrToPos = voters.AddrToPos
+ res.Message = msg
+ res.Prover, err = stateproof.MakeProver(msg.Hash(),
+ uint64(rnd),
provenWeight,
voters.Participants,
voters.Tree,
config.Consensus[votersHdr.CurrentProtocol].StateProofStrengthTarget)
if err != nil {
- return builder{}, err
+ return spProver{}, err
}
return res, nil
}
-func (spw *Worker) initBuilders() {
- spw.mu.Lock()
- defer spw.mu.Unlock()
-
- var roundSigs map[basics.Round][]pendingSig
- err := spw.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- roundSigs, err = getPendingSigs(tx)
- return
- })
+func (spw *Worker) initProvers() {
+ spw.provers = make(map[basics.Round]spProver)
+ rnds, err := spw.getAllOnlineProverRounds()
if err != nil {
- spw.log.Warnf("initBuilders: getPendingSigs: %v", err)
+ spw.log.Errorf("initProvers: failed to load rounds: %v", err)
return
}
- for rnd, sigs := range roundSigs {
- if _, ok := spw.builders[rnd]; ok {
- spw.log.Warnf("initBuilders: round %d already present", rnd)
+ for _, rnd := range rnds {
+ if _, ok := spw.provers[rnd]; ok {
+ spw.log.Warnf("initProvers: round %d already present", rnd)
continue
}
- spw.addSigsToBuilder(sigs, rnd)
+
+ prover, err := spw.loadOrCreateProverWithSignatures(rnd)
+ if err != nil {
+ spw.log.Warnf("initProvers: failed to load prover for round %d", rnd)
+ continue
+ }
+ spw.provers[rnd] = prover
}
}
-func (spw *Worker) addSigsToBuilder(sigs []pendingSig, rnd basics.Round) {
- builderForRound, err := spw.makeBuilderForRound(rnd)
+func (spw *Worker) getAllOnlineProverRounds() ([]basics.Round, error) {
+ // Some state proof databases might only contain a signature table. For that reason, when trying to create provers for possible state proof
+ // rounds we search the signature table and not the prover table
+ latest := spw.ledger.Latest()
+ latestHdr, err := spw.ledger.BlockHdr(latest)
if err != nil {
- spw.log.Warnf("addSigsToBuilder: makeBuilderForRound(%d): %v", rnd, err)
- return
+ return nil, err
+ }
+ proto := config.Consensus[latestHdr.CurrentProtocol]
+ if proto.StateProofInterval == 0 { // StateProofs are not enabled yet
+ return nil, err
}
- spw.builders[rnd] = builderForRound
- for _, sig := range sigs {
- pos, ok := builderForRound.voters.AddrToPos[sig.signer]
- if !ok {
- spw.log.Warnf("addSigsToBuilder: cannot find %v in round %d", sig.signer, rnd)
- continue
- }
+ latestStateProofRound := latest.RoundDownToMultipleOf(basics.Round(proto.StateProofInterval))
+ threshold := onlineProversThreshold(&proto, latestHdr.StateProofTracking[protocol.StateProofBasic].StateProofNextRound)
- isPresent, err := builderForRound.Present(pos)
- if err != nil {
- spw.log.Warnf("addSigsToBuilder: failed to invoke builderForRound.Present on pos %d - %v", pos, err)
- continue
- }
- if isPresent {
- spw.log.Warnf("addSigsToBuilder: cannot add %v in round %d: position %d already added", sig.signer, rnd, pos)
- continue
- }
+ var rnds []basics.Round
+ err = spw.db.Atomic(func(_ context.Context, tx *sql.Tx) error {
+ var err error
+ rnds, err = getSignatureRounds(tx, threshold, latestStateProofRound)
+ return err
+ })
- if err := builderForRound.IsValid(pos, &sig.sig, false); err != nil {
- spw.log.Warnf("addSigsToBuilder: cannot add %v in round %d: %v", sig.signer, rnd, err)
- continue
- }
- if err := builderForRound.Add(pos, sig.sig); err != nil {
- spw.log.Warnf("addSigsToBuilder: error while adding sig. inner error: %v", err)
- continue
- }
+ return rnds, err
+}
+
+var errAddressNotInVoters = errors.New("cannot find address in builder") // Address was not a part of the voters for this StateProof (top N accounts)
+var errFailedToAddSigAtPos = errors.New("could not add signature to builder") // Position was out of array bounds or signature already present
+var errSigAlreadyPresentAtPos = errors.New("signature already present at this position") // Signature already present at this position
+var errSignatureVerification = errors.New("error while verifying signature") // Signature failed cryptographic verification
+
+func (b *spProver) insertSig(s *pendingSig, verify bool) error {
+ rnd := b.Round
+ pos, ok := b.AddrToPos[s.signer]
+ if !ok {
+ return fmt.Errorf("insertSig: %w (%v not in participants for round %d)", errAddressNotInVoters, s.signer, rnd)
}
+
+ isPresent, err := b.Present(pos)
+ if err != nil {
+ return fmt.Errorf("insertSig: %w (failed to invoke builderForRound.Present on pos %d - %v)", errFailedToAddSigAtPos, pos, err)
+ }
+ if isPresent {
+ return errSigAlreadyPresentAtPos
+ }
+
+ if err = b.IsValid(pos, &s.sig, verify); err != nil {
+ return fmt.Errorf("insertSig: %w (cannot add %v in round %d: %v)", errSignatureVerification, s.signer, rnd, err)
+ }
+ if err = b.Add(pos, s.sig); err != nil {
+ return fmt.Errorf("insertSig: %w (%v)", errFailedToAddSigAtPos, err)
+ }
+
+ return nil
}
func (spw *Worker) handleSigMessage(msg network.IncomingMessage) network.OutgoingMessage {
@@ -163,14 +324,31 @@ func (spw *Worker) handleSigMessage(msg network.IncomingMessage) network.Outgoin
return network.OutgoingMessage{Action: fwd}
}
-// handleSig adds a signature to the pending in-memory state proof provers (builders). This function is
+// meetsBroadcastPolicy verifies that the signature's round is either under the threshold round or equal to the
+// latest StateProof round.
+// This signature filtering is only relevant when the StateProof chain is stalled and many signatures may be spammed.
+func (spw *Worker) meetsBroadcastPolicy(sfa sigFromAddr, latestRound basics.Round, proto *config.ConsensusParams, stateProofNextRound basics.Round) bool {
+ if sfa.Round <= onlineProversThreshold(proto, stateProofNextRound) {
+ return true
+ }
+
+ latestStateProofRound := latestRound.RoundDownToMultipleOf(basics.Round(proto.StateProofInterval))
+ return sfa.Round == latestStateProofRound
+}
+
+// handleSig adds a signature to the pending in-memory state proof provers (provers). This function is
// also responsible for making sure that the signature is valid, and not duplicated.
// if a signature passes all verification it is written into the database.
func (spw *Worker) handleSig(sfa sigFromAddr, sender network.Peer) (network.ForwardingPolicy, error) {
spw.mu.Lock()
defer spw.mu.Unlock()
- builderForRound, ok := spw.builders[sfa.Round]
+ // might happen if the state proof worker is stopping
+ if spw.provers == nil {
+ return network.Ignore, fmt.Errorf("handleSig: no provers loaded")
+ }
+
+ proverForRound, ok := spw.provers[sfa.Round]
if !ok {
latest := spw.ledger.Latest()
latestHdr, err := spw.ledger.BlockHdr(latest)
@@ -178,16 +356,15 @@ func (spw *Worker) handleSig(sfa sigFromAddr, sender network.Peer) (network.Forw
return network.Ignore, err
}
- if sfa.Round < latestHdr.StateProofTracking[protocol.StateProofBasic].StateProofNextRound {
+ stateProofNextRound := latestHdr.StateProofTracking[protocol.StateProofBasic].StateProofNextRound
+
+ if sfa.Round < stateProofNextRound {
// Already have a complete state proof in ledger.
// Ignore this sig.
return network.Ignore, nil
}
- // The sig should be for a round which is a multiple of StateProofInterval
- // using the latestHdr protocol, since changing StateProofInterval is not supported
proto := config.Consensus[latestHdr.CurrentProtocol]
-
// proto.StateProofInterval is not expected to be 0 after passing StateProofNextRound
// checking anyway, otherwise will panic
if proto.StateProofInterval == 0 {
@@ -201,46 +378,52 @@ func (spw *Worker) handleSig(sfa sigFromAddr, sender network.Peer) (network.Forw
sfa.Round, proto.StateProofInterval)
}
- builderForRound, err = spw.makeBuilderForRound(sfa.Round)
+ if sfa.Round > latest {
+ // avoiding an inspection in DB in case we haven't reached the round.
+ // Avoiding disconnecting the peer, since it might've been sent to this node while it recovers.
+ return network.Ignore, fmt.Errorf("handleSig: latest round is smaller than given round %d", sfa.Round)
+ }
+
+ // We want to save the signature in the DB if we know we generated it. However, if the signature's source is
+ // external, we only want to process it if we know for sure it meets our broadcast policy.
+ if sender != nil && !spw.meetsBroadcastPolicy(sfa, latestHdr.Round, &proto, stateProofNextRound) {
+ return network.Ignore, nil
+ }
+
+ proverForRound, err = spw.loadOrCreateProverWithSignatures(sfa.Round)
if err != nil {
// Should not disconnect this peer, since this is a fault of the relay
// The peer could have other signatures what the relay is interested in
return network.Ignore, err
}
- spw.builders[sfa.Round] = builderForRound
+ spw.provers[sfa.Round] = proverForRound
spw.log.Infof("spw.handleSig: starts gathering signatures for round %d", sfa.Round)
}
- pos, ok := builderForRound.voters.AddrToPos[sfa.SignerAddress]
- if !ok {
- return network.Disconnect, fmt.Errorf("handleSig: %v not in participants for %d", sfa.SignerAddress, sfa.Round)
+ sig := pendingSig{
+ signer: sfa.SignerAddress,
+ sig: sfa.Sig,
+ fromThisNode: sender == nil,
}
-
- if isPresent, err := builderForRound.Present(pos); err != nil || isPresent {
- // Signature already part of the builderForRound, ignore.
+ err := proverForRound.insertSig(&sig, true)
+ if errors.Is(err, errSigAlreadyPresentAtPos) {
+ // Safe to ignore this error as it means we already have a valid signature for this address
return network.Ignore, nil
}
-
- if err := builderForRound.IsValid(pos, &sfa.Sig, true); err != nil {
+ if errors.Is(err, errAddressNotInVoters) || errors.Is(err, errSignatureVerification) {
return network.Disconnect, err
}
+ if err != nil { // errFailedToAddSigAtPos and fallback in case of unknown error
+ return network.Ignore, err
+ }
- err := spw.db.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- return addPendingSig(tx, sfa.Round, pendingSig{
- signer: sfa.SignerAddress,
- sig: sfa.Sig,
- fromThisNode: sender == nil,
- })
+ err = spw.db.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ return addPendingSig(tx, sfa.Round, sig)
})
if err != nil {
return network.Ignore, err
}
- // validated that we can add the sig previously.
- if err := builderForRound.Add(pos, sfa.Sig); err != nil {
- // only Present called from Add returns an error which is already
- // passed in the call above.
- return network.Ignore, err
- }
+
return network.Broadcast, nil
}
@@ -252,48 +435,48 @@ func (spw *Worker) builder(latest basics.Round) {
// will settle for a larger proof. New blocks also tell us
// if a state proof has been committed, so that we can stop trying
// to build it.
+
+ nextBroadcastRnd := latest
for {
spw.tryBroadcast()
- nextrnd := latest + 1
select {
case <-spw.ctx.Done():
spw.wg.Done()
return
- case <-spw.ledger.Wait(nextrnd):
+ case <-spw.ledger.Wait(nextBroadcastRnd + 1):
// Continue on
}
- // See if any new state proofs were formed, according to
- // the new block, which would mean we can clean up some builders.
- hdr, err := spw.ledger.BlockHdr(nextrnd)
+ newLatest := spw.ledger.Latest()
+ newLatestHdr, err := spw.ledger.BlockHdr(newLatest)
+
if err != nil {
- spw.log.Warnf("spw.builder: BlockHdr(%d): %v", nextrnd, err)
+ spw.log.Warnf("spw.builder: BlockHdr(%d): %v", newLatest, err)
continue
}
- spw.deleteOldSigs(&hdr)
- spw.deleteOldBuilders(&hdr)
+ proto := config.Consensus[newLatestHdr.CurrentProtocol]
+ stateProofNextRound := newLatestHdr.StateProofTracking[protocol.StateProofBasic].StateProofNextRound
+
+ spw.deleteProverData(&proto, stateProofNextRound)
// Broadcast signatures based on the previous block(s) that
// were agreed upon. This ensures that, if we send a signature
// for block R, nodes will have already verified block R, because
// block R+1 has been formed.
- proto := config.Consensus[hdr.CurrentProtocol]
- newLatest := spw.ledger.Latest()
- for r := latest; r < newLatest; r++ {
+ for r := nextBroadcastRnd; r < newLatest; r++ {
// Wait for the signer to catch up; mostly relevant in tests.
spw.waitForSignature(r)
-
- spw.broadcastSigs(r, proto)
+ spw.broadcastSigs(r, stateProofNextRound, proto)
}
- latest = newLatest
+ nextBroadcastRnd = newLatest
}
}
// broadcastSigs periodically broadcasts pending signatures for rounds
-// that have not been able to form a state proof.
+// that have not been able to form a state proof, with correlation to builderCacheLength.
//
// Signature re-broadcasting happens in periods of proto.StateProofInterval
// rounds.
@@ -306,7 +489,7 @@ func (spw *Worker) builder(latest basics.Round) {
//
// The broadcast schedule is randomized by the address of the block signer,
// for load-balancing over time.
-func (spw *Worker) broadcastSigs(brnd basics.Round, proto config.ConsensusParams) {
+func (spw *Worker) broadcastSigs(brnd basics.Round, stateProofNextRound basics.Round, proto config.ConsensusParams) {
if proto.StateProofInterval == 0 {
return
}
@@ -314,12 +497,14 @@ func (spw *Worker) broadcastSigs(brnd basics.Round, proto config.ConsensusParams
spw.mu.Lock()
defer spw.mu.Unlock()
+ latestStateProofRound := brnd.RoundDownToMultipleOf(basics.Round(proto.StateProofInterval))
+ threshold := onlineProversThreshold(&proto, stateProofNextRound)
var roundSigs map[basics.Round][]pendingSig
err := spw.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
if brnd%basics.Round(proto.StateProofInterval) < basics.Round(proto.StateProofInterval/2) {
- roundSigs, err = getPendingSigsFromThisNode(tx)
+ roundSigs, err = getPendingSigs(tx, threshold, latestStateProofRound, true)
} else {
- roundSigs, err = getPendingSigs(tx)
+ roundSigs, err = getPendingSigs(tx, threshold, latestStateProofRound, false)
}
return
})
@@ -357,26 +542,97 @@ func (spw *Worker) broadcastSigs(brnd basics.Round, proto config.ConsensusParams
}
}
-func (spw *Worker) deleteOldSigs(currentHdr *bookkeeping.BlockHeader) {
- oldestRoundToRemove := GetOldestExpectedStateProof(currentHdr)
+func (spw *Worker) deleteProverData(proto *config.ConsensusParams, stateProofNextRound basics.Round) {
+ if proto.StateProofInterval == 0 || stateProofNextRound == 0 {
+ return
+ }
+
+ // Delete from memory (already stored on disk). Practically, There are two scenarios where provers gets removed from memory
+ // 1. When a state proof is committed, the earliest will get removed and later on will be removed from disk.
+ // (when calling deleteStaleProver)
+ // 2. If state proofs are stalled, and consensus is moving forward, a new latest prover will be created and
+ // the older provers will be swapped out from memory. (i.e will be removed from memory but stays on disk).
+ spw.trimProversCache(proto, stateProofNextRound)
+
+ if spw.lastCleanupRound == stateProofNextRound {
+ return
+ }
+
+ // Delete from disk (database)
+ spw.deleteStaleSigs(stateProofNextRound)
+ spw.deleteStaleKeys(stateProofNextRound)
+ spw.deleteStaleProver(stateProofNextRound)
+ spw.lastCleanupRound = stateProofNextRound
+}
+func (spw *Worker) deleteStaleSigs(retainRound basics.Round) {
err := spw.db.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- return deletePendingSigsBeforeRound(tx, oldestRoundToRemove)
+ return deletePendingSigsBeforeRound(tx, retainRound)
})
if err != nil {
- spw.log.Warnf("deletePendingSigsBeforeRound(%d): %v", oldestRoundToRemove, err)
+ spw.log.Warnf("deleteStaleSigs(%d): %v", retainRound, err)
}
}
-func (spw *Worker) deleteOldBuilders(currentHdr *bookkeeping.BlockHeader) {
- oldestRoundToRemove := GetOldestExpectedStateProof(currentHdr)
+func (spw *Worker) deleteStaleKeys(retainRound basics.Round) {
+ keys := spw.accts.StateProofKeys(retainRound)
+ for _, key := range keys {
+ firstRoundAtKeyLifeTime, err := key.StateProofSecrets.FirstRoundInKeyLifetime()
+ if err != nil {
+ spw.log.Errorf("deleteStaleKeys: could not calculate keylifetime for account %v on round %d: %v", key.ParticipationID, firstRoundAtKeyLifeTime, err)
+ continue
+ }
+ err = spw.accts.DeleteStateProofKey(key.ParticipationID, basics.Round(firstRoundAtKeyLifeTime))
+ if err != nil {
+ spw.log.Warnf("deleteStaleKeys: could not remove key for account %v on round %d: %v", key.ParticipationID, firstRoundAtKeyLifeTime, err)
+ }
+ }
+}
+func (spw *Worker) deleteStaleProver(retainRound basics.Round) {
+ err := spw.db.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ return deleteProvers(tx, retainRound)
+ })
+ if err != nil {
+ spw.log.Warnf("deleteStaleProver: failed to delete provers from database: %v", err)
+ }
+}
+
+// onlineProversThreshold returns the highest round for which the prover should be stored in memory (cache).
+// This is mostly relevant in case the StateProof chain is stalled.
+// The threshold is also used to limit the StateProof signatures broadcasted over the network.
+func onlineProversThreshold(proto *config.ConsensusParams, stateProofNextRound basics.Round) basics.Round {
+ /*
+ proverCacheLength - 2:
+ let proversCacheLength <- 5, StateProofNextRound <- 1024, LatestRound <- 4096
+ threshold = StateProofNextRound + 3 * StateProofInterval (for a total of 4 early StateProofs)
+ the 5th prover in the cache is reserved for the LatestRound stateproof.
+ */
+ threshold := stateProofNextRound + basics.Round((proversCacheLength-2)*proto.StateProofInterval)
+ return threshold
+}
+
+// trimProversCache reduces the number of provers stored in memory to X earliest as well as 1 latest, to an overall amount of X+1 provers
+func (spw *Worker) trimProversCache(proto *config.ConsensusParams, stateProofNextRound basics.Round) {
spw.mu.Lock()
defer spw.mu.Unlock()
- for rnd := range spw.builders {
- if rnd < oldestRoundToRemove {
- delete(spw.builders, rnd)
+ var maxProverRound basics.Round
+ for rnd := range spw.provers {
+ if rnd > maxProverRound {
+ maxProverRound = rnd
+ }
+ }
+
+ threshold := onlineProversThreshold(proto, stateProofNextRound)
+ /*
+ For example, provers currently stored in memory are for these rounds:
+ [..., StateProofNextRound-256, StateProofNextRound, StateProofNextRound+256, ..., Threshold, ..., maxProverRound]
+ [StateProofNextRound, ..., Threshold, maxProverRound] <- Only provers that should be stored in memory after trim
+ */
+ for rnd := range spw.provers {
+ if rnd < stateProofNextRound || (threshold < rnd && rnd < maxProverRound) {
+ delete(spw.provers, rnd)
}
}
}
@@ -385,30 +641,38 @@ func (spw *Worker) tryBroadcast() {
spw.mu.Lock()
defer spw.mu.Unlock()
- sortedRounds := make([]basics.Round, 0, len(spw.builders))
- for rnd := range spw.builders {
+ sortedRounds := make([]basics.Round, 0, len(spw.provers))
+ for rnd := range spw.provers {
sortedRounds = append(sortedRounds, rnd)
}
sort.Slice(sortedRounds, func(i, j int) bool { return sortedRounds[i] < sortedRounds[j] })
- for _, rnd := range sortedRounds { // Iterate over the builders in a sequential manner
- b := spw.builders[rnd]
+ for _, rnd := range sortedRounds {
+ // Iterate over the provers in a sequential manner. If the earlist state proof is not ready/rejected
+ // it won't be possible to add a later one. For that reason, we break the loop
+ b := spw.provers[rnd]
firstValid := spw.ledger.Latest()
- acceptableWeight := verify.AcceptableStateProofWeight(&b.votersHdr, firstValid, logging.Base())
+ acceptableWeight := verify.AcceptableStateProofWeight(&b.VotersHdr, firstValid, logging.Base())
if b.SignedWeight() < acceptableWeight {
// Haven't signed enough to build the state proof at this time..
- continue
+ break
}
if !b.Ready() {
// Haven't gotten enough signatures to get past ProvenWeight
- continue
+ break
}
- sp, err := b.Build()
+ sp, err := b.CreateProof()
if err != nil {
spw.log.Warnf("spw.tryBroadcast: building state proof for %d failed: %v", rnd, err)
- continue
+ break
+ }
+
+ latestHeader, err := spw.ledger.BlockHdr(firstValid)
+ if err != nil {
+ spw.log.Warnf("spw.tryBroadcast: could not fetch block header for round %d failed: %v", firstValid, err)
+ break
}
spw.log.Infof("spw.tryBroadcast: building state proof transaction for round %d", rnd)
@@ -416,11 +680,11 @@ func (spw *Worker) tryBroadcast() {
stxn.Txn.Type = protocol.StateProofTx
stxn.Txn.Sender = transactions.StateProofSender
stxn.Txn.FirstValid = firstValid
- stxn.Txn.LastValid = firstValid + basics.Round(b.voters.Proto.MaxTxnLife)
+ stxn.Txn.LastValid = firstValid + basics.Round(config.Consensus[latestHeader.CurrentProtocol].MaxTxnLife)
stxn.Txn.GenesisHash = spw.ledger.GenesisHash()
stxn.Txn.StateProofTxnFields.StateProofType = protocol.StateProofBasic
stxn.Txn.StateProofTxnFields.StateProof = *sp
- stxn.Txn.StateProofTxnFields.Message = b.message
+ stxn.Txn.StateProofTxnFields.Message = b.Message
err = spw.txnSender.BroadcastInternalSignedTxGroup([]transactions.SignedTxn{stxn})
if err != nil {
spw.log.Warnf("spw.tryBroadcast: broadcasting state proof txn for %d: %v", rnd, err)
@@ -432,9 +696,9 @@ func (spw *Worker) tryBroadcast() {
}
func (spw *Worker) invokeBuilder(r basics.Round) {
- spw.mu.Lock()
+ spw.signedMu.Lock()
spw.signed = r
- spw.mu.Unlock()
+ spw.signedMu.Unlock()
select {
case spw.signedCh <- struct{}{}:
@@ -443,8 +707,8 @@ func (spw *Worker) invokeBuilder(r basics.Round) {
}
func (spw *Worker) lastSignedBlock() basics.Round {
- spw.mu.Lock()
- defer spw.mu.Unlock()
+ spw.signedMu.RLock()
+ defer spw.signedMu.RUnlock()
return spw.signed
}
diff --git a/stateproof/db.go b/stateproof/db.go
index b1883f591..f0b0e5cc1 100644
--- a/stateproof/db.go
+++ b/stateproof/db.go
@@ -17,15 +17,18 @@
package stateproof
import (
+ "context"
"database/sql"
"fmt"
"github.com/algorand/go-algorand/crypto/merklesignature"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/stateproofmsg"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util/db"
)
-var schema = []string{
+const (
// sigs tracks signatures used to build a state proofs, for
// rounds that have not formed a state proofs yet.
//
@@ -35,33 +38,68 @@ var schema = []string{
//
// Signatures produced by this node are special because we broadcast
// them early; other signatures are retransmitted later on.
- `CREATE TABLE IF NOT EXISTS sigs (
+ createSigsTable = `CREATE TABLE IF NOT EXISTS sigs (
sprnd integer,
signer blob,
sig blob,
from_this_node integer,
- UNIQUE (sprnd, signer))`,
+ UNIQUE (sprnd, signer))`
- `CREATE INDEX IF NOT EXISTS sigs_from_this_node ON sigs (from_this_node)`,
+ createSigsIdx = `CREATE INDEX IF NOT EXISTS sigs_from_this_node ON sigs (from_this_node)`
+
+ // provers table stored a serialization of each ProverForRound data, without the sigs (stored separately)
+ createProverTable = `CREATE TABLE IF NOT EXISTS provers (
+ round INTEGER PRIMARY KEY NOT NULL,
+ prover BLOB NOT NULL
+ )`
+
+ insertOrReplaceProverForRound = `INSERT OR REPLACE INTO provers (round,prover) VALUES (?,?)`
+
+ selectProverForRound = `SELECT prover FROM provers WHERE round=?`
+
+ deleteProverForRound = `DELETE FROM provers WHERE round<?`
+)
+
+// dbSchemaUpgrade0 initialize the tables.
+func dbSchemaUpgrade0(_ context.Context, tx *sql.Tx, _ bool) error {
+ _, err := tx.Exec(createSigsTable)
+ if err != nil {
+ return err
+ }
+
+ _, err = tx.Exec(createSigsIdx)
+
+ return err
}
-type pendingSig struct {
- signer basics.Address
- sig merklesignature.Signature
- fromThisNode bool
+func dbSchemaUpgrade1(_ context.Context, tx *sql.Tx, _ bool) error {
+ _, err := tx.Exec(createProverTable)
+
+ return err
}
-func initDB(tx *sql.Tx) error {
- for i, tableCreate := range schema {
- _, err := tx.Exec(tableCreate)
- if err != nil {
- return fmt.Errorf("could not state proof table %d: %v", i, err)
- }
+func makeStateProofDB(accessor db.Accessor) error {
+ migrations := []db.Migration{
+ dbSchemaUpgrade0,
+ dbSchemaUpgrade1,
+ }
+
+ err := db.Initialize(accessor, migrations)
+ if err != nil {
+ return fmt.Errorf("unable to initialize participation registry database: %w", err)
}
return nil
}
+//#region Sig Operations
+
+type pendingSig struct {
+ signer basics.Address
+ sig merklesignature.Signature
+ fromThisNode bool
+}
+
func addPendingSig(tx *sql.Tx, rnd basics.Round, psig pendingSig) error {
_, err := tx.Exec("INSERT INTO sigs (sprnd, signer, sig, from_this_node) VALUES (?, ?, ?, ?)",
rnd,
@@ -76,8 +114,15 @@ func deletePendingSigsBeforeRound(tx *sql.Tx, rnd basics.Round) error {
return err
}
-func getPendingSigs(tx *sql.Tx) (map[basics.Round][]pendingSig, error) {
- rows, err := tx.Query("SELECT sprnd, signer, sig, from_this_node FROM sigs")
+// Returns pending sigs up to the threshold round.
+// The highest round sigs (which might be higher than the threshold) is also included.
+func getPendingSigs(tx *sql.Tx, threshold basics.Round, maxRound basics.Round, onlyFromThisNode bool) (map[basics.Round][]pendingSig, error) {
+ query := "SELECT sprnd, signer, sig, from_this_node FROM sigs WHERE (sprnd<=? OR sprnd=?)"
+ if onlyFromThisNode {
+ query += " AND from_this_node=1"
+ }
+
+ rows, err := tx.Query(query, threshold, maxRound)
if err != nil {
return nil, err
}
@@ -86,14 +131,28 @@ func getPendingSigs(tx *sql.Tx) (map[basics.Round][]pendingSig, error) {
return rowsToPendingSigs(rows)
}
-func getPendingSigsFromThisNode(tx *sql.Tx) (map[basics.Round][]pendingSig, error) {
- rows, err := tx.Query("SELECT sprnd, signer, sig, from_this_node FROM sigs WHERE from_this_node=1")
+func getPendingSigsForRound(tx *sql.Tx, rnd basics.Round) ([]pendingSig, error) {
+ rows, err := tx.Query("SELECT sprnd, signer, sig, from_this_node FROM sigs WHERE sprnd=?", rnd)
if err != nil {
return nil, err
}
defer rows.Close()
+ tmpmap, err := rowsToPendingSigs(rows)
+ if err != nil {
+ return nil, err
+ }
+ return tmpmap[rnd], nil
+}
- return rowsToPendingSigs(rows)
+func sigExistsInDB(tx *sql.Tx, rnd basics.Round, account Address) (bool, error) {
+ row := tx.QueryRow("SELECT EXISTS ( SELECT 1 FROM sigs WHERE signer=? AND sprnd=?)", account[:], rnd)
+
+ exists := 0
+ if err := row.Scan(&exists); err != nil {
+ return false, err
+ }
+
+ return exists != 0, nil
}
func rowsToPendingSigs(rows *sql.Rows) (map[basics.Round][]pendingSig, error) {
@@ -121,3 +180,90 @@ func rowsToPendingSigs(rows *sql.Rows) (map[basics.Round][]pendingSig, error) {
return res, rows.Err()
}
+
+//#endregion
+
+//#region Prover Operations
+func persistProver(tx *sql.Tx, rnd basics.Round, b *spProver) error {
+ _, err := tx.Exec(insertOrReplaceProverForRound, rnd, protocol.Encode(b))
+ return err
+}
+
+func getProver(tx *sql.Tx, rnd basics.Round) (spProver, error) {
+ row := tx.QueryRow(selectProverForRound, rnd)
+ var rawProver []byte
+ err := row.Scan(&rawProver)
+ if err != nil {
+ return spProver{}, fmt.Errorf("getProver: prover for round %d not found in the database: %w", rnd, err)
+ }
+ var bldr spProver
+ err = protocol.Decode(rawProver, &bldr)
+ if err != nil {
+ return spProver{}, fmt.Errorf("getProver: getProver: prover for round %d failed to decode: %w", rnd, err)
+ }
+
+ // Stored Prover is corrupted...
+ if bldr.Prover == nil {
+ return spProver{}, fmt.Errorf("getProver: prover for round %d is corrupted", rnd)
+ }
+
+ bldr.Prover.AllocSigs()
+
+ return bldr, nil
+}
+
+// This function is used to fetch only the StateProof Message from within the prover stored on disk.
+// In the future, StateProof messages should perhaps be stored in their own table and this implementation will change.
+func getMessage(tx *sql.Tx, rnd basics.Round) (stateproofmsg.Message, error) {
+ row := tx.QueryRow(selectProverForRound, rnd)
+ var rawProver []byte
+ err := row.Scan(&rawProver)
+ if err != nil {
+ return stateproofmsg.Message{}, fmt.Errorf("getMessage: prover for round %d not found in the database: %w", rnd, err)
+ }
+ var bldr spProver
+ err = protocol.Decode(rawProver, &bldr)
+ if err != nil {
+ return stateproofmsg.Message{}, fmt.Errorf("getMessage: prover for round %d failed to decode: %w", rnd, err)
+ }
+
+ return bldr.Message, nil
+}
+
+func proverExistInDB(tx *sql.Tx, rnd basics.Round) (bool, error) {
+ row := tx.QueryRow("SELECT EXISTS ( SELECT 1 FROM provers WHERE round=? )", rnd)
+
+ exists := 0
+ if err := row.Scan(&exists); err != nil {
+ return false, err
+ }
+
+ return exists != 0, nil
+}
+
+// deleteProvers deletes all provers before (but not including) the given rnd
+func deleteProvers(tx *sql.Tx, rnd basics.Round) error {
+ _, err := tx.Exec(deleteProverForRound, rnd)
+ return err
+}
+
+func getSignatureRounds(tx *sql.Tx, threshold basics.Round, maxRound basics.Round) ([]basics.Round, error) {
+ var rnds []basics.Round
+ rows, err := tx.Query("SELECT DISTINCT sprnd FROM sigs WHERE (sprnd<=? OR sprnd=?)", threshold, maxRound)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ var rnd basics.Round
+ for rows.Next() {
+ err := rows.Scan(&rnd)
+ if err != nil {
+ return nil, err
+ }
+ rnds = append(rnds, rnd)
+ }
+ return rnds, nil
+}
+
+//#endregion
diff --git a/stateproof/db_test.go b/stateproof/db_test.go
index 0de517895..5fc136554 100644
--- a/stateproof/db_test.go
+++ b/stateproof/db_test.go
@@ -26,6 +26,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/crypto/stateproof"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/test/partitiontest"
@@ -48,15 +49,56 @@ func dbOpenTest(t testing.TB, inMemory bool) (db.Pair, string) {
return dbOpenTestRand(t, inMemory, crypto.RandUint64())
}
+func TestDbSchemaUpgrade1(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ dbs, _ := dbOpenTest(t, true)
+ defer dbs.Close()
+
+ migrations := []db.Migration{
+ dbSchemaUpgrade0,
+ dbSchemaUpgrade1,
+ }
+
+ a.NoError(db.Initialize(dbs.Wdb, migrations[:1]))
+
+ // performing a request on sig db.
+ a.NoError(dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ var psig pendingSig
+ crypto.RandBytes(psig.signer[:])
+ return addPendingSig(tx, 0, psig)
+ }))
+
+ p := spProver{Prover: &stateproof.Prover{}}
+ p.ProvenWeight = 5
+ a.ErrorContains(dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ return persistProver(tx, 0, &p)
+ }), "no such table: provers")
+
+ // migrating the DB to the next version.
+ a.NoError(makeStateProofDB(dbs.Wdb))
+
+ a.NoError(dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ return persistProver(tx, 0, &p)
+ }))
+
+ var p2 spProver
+ a.NoError(dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ var err error
+ p2, err = getProver(tx, 0)
+ return err
+ }))
+ a.Equal(p.ProverPersistedFields, p2.ProverPersistedFields)
+}
+
func TestPendingSigDB(t *testing.T) {
partitiontest.PartitionTest(t)
dbs, _ := dbOpenTest(t, true)
defer dbs.Close()
- err := dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- return initDB(tx)
- })
+ err := makeStateProofDB(dbs.Wdb)
require.NoError(t, err)
for r := basics.Round(0); r < basics.Round(100); r++ {
@@ -88,12 +130,12 @@ func TestPendingSigDB(t *testing.T) {
var psigsThis map[basics.Round][]pendingSig
err = dbs.Rdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
var err error
- psigs, err = getPendingSigs(tx)
+ psigs, err = getPendingSigs(tx, basics.Round(100), basics.Round(100), false)
if err != nil {
return err
}
- psigsThis, err = getPendingSigsFromThisNode(tx)
+ psigsThis, err = getPendingSigs(tx, basics.Round(100), basics.Round(100), true)
if err != nil {
return err
}
@@ -117,3 +159,146 @@ func TestPendingSigDB(t *testing.T) {
}
}
}
+
+func TestSigExistQuery(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ dbs, _ := dbOpenTest(t, true)
+ defer dbs.Close()
+
+ require.NoError(t, makeStateProofDB(dbs.Wdb))
+
+ n := 8
+ var accts []basics.Address
+ // setup:
+ for r := basics.Round(0); r < basics.Round(n); r++ {
+ var psig pendingSig
+ crypto.RandBytes(psig.signer[:])
+ accts = append(accts, psig.signer)
+
+ require.NoError(t, dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ return addPendingSig(tx, r, psig)
+ }))
+ }
+
+ // all addresses have signed the message so sigExistsInDB should result with true:
+ for r := basics.Round(0); r < basics.Round(n/2); r++ {
+ require.NoError(t, dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ exists, err := sigExistsInDB(tx, r, accts[r])
+ require.NoError(t, err)
+ require.True(t, exists)
+ return nil
+ }))
+ }
+
+ // a "wrongAddress" should not have signatures in the dabase
+ require.NoError(t, dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ wrongAddress := accts[0]
+ var actCopy basics.Address
+ copy(actCopy[:], wrongAddress[:])
+ actCopy[0]++
+ exists, err := sigExistsInDB(tx, 0, actCopy)
+ require.NoError(t, err)
+ require.False(t, exists)
+ return nil
+ }))
+
+ require.NoError(t, dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ return deletePendingSigsBeforeRound(tx, basics.Round(n))
+ }))
+
+ for r := basics.Round(n / 2); r < basics.Round(n); r++ {
+ require.NoError(t, dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ exists, err := sigExistsInDB(tx, r, accts[r])
+ require.NoError(t, err)
+ require.False(t, exists)
+ return nil
+ }))
+ }
+}
+
+func TestProversDB(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ dbs, _ := dbOpenTest(t, true)
+ defer dbs.Close()
+ err := makeStateProofDB(dbs.Wdb)
+ a.NoError(err)
+
+ provers := make([]spProver, 100)
+ for i := uint64(0); i < 100; i++ {
+ var prover spProver
+ prover.Prover = &stateproof.Prover{}
+ prover.Round = i
+ provers[i] = prover
+
+ err = dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ return persistProver(tx, basics.Round(i), &provers[i])
+ })
+ a.NoError(err)
+ }
+
+ var count int
+ err = dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ err = tx.QueryRow("SELECT count(1) FROM provers").Scan(&count)
+ return err
+ })
+ a.NoError(err)
+ a.Equal(100, count)
+
+ err = dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ return deleteProvers(tx, basics.Round(35))
+ })
+ a.NoError(err)
+ err = dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ err = tx.QueryRow("SELECT count(1) FROM provers").Scan(&count)
+ return err
+ })
+ a.NoError(err)
+ a.Equal(100-35, count)
+
+ var prover spProver
+ err = dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ prover, err = getProver(tx, basics.Round(34))
+ return err
+ })
+ a.ErrorIs(err, sql.ErrNoRows)
+
+ err = dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ prover, err = getProver(tx, basics.Round(35))
+ return err
+ })
+ a.NoError(err)
+ a.Equal(uint64(35), prover.Round)
+}
+
+func TestDbProverAlreadyExists(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ dbs, _ := dbOpenTest(t, true)
+ defer dbs.Close()
+ err := makeStateProofDB(dbs.Wdb)
+ a.NoError(err)
+
+ var prover spProver
+ var outProv spProver
+
+ prover.Prover = &stateproof.Prover{}
+ prover.Round = 2
+ prover.Data[3] = 5
+
+ for i := 0; i < 2; i++ {
+ err = dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ return persistProver(tx, basics.Round(2), &prover)
+ })
+ a.NoError(err)
+ err = dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ outProv, err = getProver(tx, basics.Round(2))
+ return err
+ })
+ a.NoError(err)
+ a.Equal(prover.ProverPersistedFields, outProv.ProverPersistedFields)
+ }
+}
diff --git a/stateproof/msgp_gen.go b/stateproof/msgp_gen.go
index 75abd4aa7..db80b731a 100644
--- a/stateproof/msgp_gen.go
+++ b/stateproof/msgp_gen.go
@@ -3,7 +3,11 @@ package stateproof
// Code generated by github.com/algorand/msgp DO NOT EDIT.
import (
+ "sort"
+
"github.com/algorand/msgp/msgp"
+
+ "github.com/algorand/go-algorand/crypto/stateproof"
)
// The following msgp objects are implemented in this file:
@@ -15,6 +19,14 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// spProver
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// MarshalMsg implements msgp.Marshaler
func (z *sigFromAddr) MarshalMsg(b []byte) (o []byte) {
@@ -167,3 +179,289 @@ func (z *sigFromAddr) Msgsize() (s int) {
func (z *sigFromAddr) MsgIsZero() bool {
return ((*z).SignerAddress.MsgIsZero()) && ((*z).Round.MsgIsZero()) && ((*z).Sig.MsgIsZero())
}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *spProver) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0003Len := uint32(4)
+ var zb0003Mask uint8 /* 5 bits */
+ if len((*z).AddrToPos) == 0 {
+ zb0003Len--
+ zb0003Mask |= 0x2
+ }
+ if (*z).VotersHdr.MsgIsZero() {
+ zb0003Len--
+ zb0003Mask |= 0x4
+ }
+ if (*z).Message.MsgIsZero() {
+ zb0003Len--
+ zb0003Mask |= 0x8
+ }
+ if (*z).Prover == nil {
+ zb0003Len--
+ zb0003Mask |= 0x10
+ }
+ // variable map header, size zb0003Len
+ o = append(o, 0x80|uint8(zb0003Len))
+ if zb0003Len != 0 {
+ if (zb0003Mask & 0x2) == 0 { // if not empty
+ // string "addr"
+ o = append(o, 0xa4, 0x61, 0x64, 0x64, 0x72)
+ if (*z).AddrToPos == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).AddrToPos)))
+ }
+ zb0001_keys := make([]Address, 0, len((*z).AddrToPos))
+ for zb0001 := range (*z).AddrToPos {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(SortAddress(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).AddrToPos[zb0001]
+ _ = zb0002
+ o = zb0001.MarshalMsg(o)
+ o = msgp.AppendUint64(o, zb0002)
+ }
+ }
+ if (zb0003Mask & 0x4) == 0 { // if not empty
+ // string "hdr"
+ o = append(o, 0xa3, 0x68, 0x64, 0x72)
+ o = (*z).VotersHdr.MarshalMsg(o)
+ }
+ if (zb0003Mask & 0x8) == 0 { // if not empty
+ // string "msg"
+ o = append(o, 0xa3, 0x6d, 0x73, 0x67)
+ o = (*z).Message.MarshalMsg(o)
+ }
+ if (zb0003Mask & 0x10) == 0 { // if not empty
+ // string "prv"
+ o = append(o, 0xa3, 0x70, 0x72, 0x76)
+ if (*z).Prover == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = (*z).Prover.MarshalMsg(o)
+ }
+ }
+ }
+ return
+}
+
+func (_ *spProver) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*spProver)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *spProver) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ (*z).Prover = nil
+ } else {
+ if (*z).Prover == nil {
+ (*z).Prover = new(stateproof.Prover)
+ }
+ bts, err = (*z).Prover.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Prover")
+ return
+ }
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "AddrToPos")
+ return
+ }
+ if zb0005 > stateproof.VotersAllocBound {
+ err = msgp.ErrOverflow(uint64(zb0005), uint64(stateproof.VotersAllocBound))
+ err = msgp.WrapError(err, "struct-from-array", "AddrToPos")
+ return
+ }
+ if zb0006 {
+ (*z).AddrToPos = nil
+ } else if (*z).AddrToPos == nil {
+ (*z).AddrToPos = make(map[Address]uint64, zb0005)
+ }
+ for zb0005 > 0 {
+ var zb0001 Address
+ var zb0002 uint64
+ zb0005--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "AddrToPos")
+ return
+ }
+ zb0002, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "AddrToPos", zb0001)
+ return
+ }
+ (*z).AddrToPos[zb0001] = zb0002
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = (*z).VotersHdr.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "VotersHdr")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = (*z).Message.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Message")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0004 {
+ (*z) = spProver{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "prv":
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ (*z).Prover = nil
+ } else {
+ if (*z).Prover == nil {
+ (*z).Prover = new(stateproof.Prover)
+ }
+ bts, err = (*z).Prover.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Prover")
+ return
+ }
+ }
+ case "addr":
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "AddrToPos")
+ return
+ }
+ if zb0007 > stateproof.VotersAllocBound {
+ err = msgp.ErrOverflow(uint64(zb0007), uint64(stateproof.VotersAllocBound))
+ err = msgp.WrapError(err, "AddrToPos")
+ return
+ }
+ if zb0008 {
+ (*z).AddrToPos = nil
+ } else if (*z).AddrToPos == nil {
+ (*z).AddrToPos = make(map[Address]uint64, zb0007)
+ }
+ for zb0007 > 0 {
+ var zb0001 Address
+ var zb0002 uint64
+ zb0007--
+ bts, err = zb0001.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "AddrToPos")
+ return
+ }
+ zb0002, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "AddrToPos", zb0001)
+ return
+ }
+ (*z).AddrToPos[zb0001] = zb0002
+ }
+ case "hdr":
+ bts, err = (*z).VotersHdr.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "VotersHdr")
+ return
+ }
+ case "msg":
+ bts, err = (*z).Message.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Message")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *spProver) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*spProver)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *spProver) Msgsize() (s int) {
+ s = 1 + 4
+ if (*z).Prover == nil {
+ s += msgp.NilSize
+ } else {
+ s += (*z).Prover.Msgsize()
+ }
+ s += 5 + msgp.MapHeaderSize
+ if (*z).AddrToPos != nil {
+ for zb0001, zb0002 := range (*z).AddrToPos {
+ _ = zb0001
+ _ = zb0002
+ s += 0 + zb0001.Msgsize() + msgp.Uint64Size
+ }
+ }
+ s += 4 + (*z).VotersHdr.Msgsize() + 4 + (*z).Message.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *spProver) MsgIsZero() bool {
+ return ((*z).Prover == nil) && (len((*z).AddrToPos) == 0) && ((*z).VotersHdr.MsgIsZero()) && ((*z).Message.MsgIsZero())
+}
diff --git a/stateproof/msgp_gen_test.go b/stateproof/msgp_gen_test.go
index ed1bd8206..0a2c02530 100644
--- a/stateproof/msgp_gen_test.go
+++ b/stateproof/msgp_gen_test.go
@@ -73,3 +73,63 @@ func BenchmarkUnmarshalsigFromAddr(b *testing.B) {
}
}
}
+
+func TestMarshalUnmarshalspProver(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := spProver{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingspProver(t *testing.T) {
+ protocol.RunEncodingTest(t, &spProver{})
+}
+
+func BenchmarkMarshalMsgspProver(b *testing.B) {
+ v := spProver{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgspProver(b *testing.B) {
+ v := spProver{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalspProver(b *testing.B) {
+ v := spProver{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/stateproof/signer.go b/stateproof/signer.go
index 5c762720a..99c715fb9 100644
--- a/stateproof/signer.go
+++ b/stateproof/signer.go
@@ -17,13 +17,16 @@
package stateproof
import (
+ "context"
+ "database/sql"
+ "errors"
"time"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto/merklesignature"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/stateproofmsg"
"github.com/algorand/go-algorand/protocol"
)
@@ -43,14 +46,7 @@ func (spw *Worker) signer(latest basics.Round) {
for { // Start signing StateProofs from nextRnd onwards
select {
case <-spw.ledger.Wait(nextRnd):
- hdr, err := spw.ledger.BlockHdr(nextRnd)
- if err != nil {
- spw.log.Warnf("spw.signer(): BlockHdr(next %d): %v", nextRnd, err)
- time.Sleep(1 * time.Second)
- nextRnd = spw.nextStateProofRound(spw.ledger.Latest())
- continue
- }
- spw.signStateProof(hdr)
+ spw.signStateProof(nextRnd)
spw.invokeBuilder(nextRnd)
nextRnd++
@@ -84,94 +80,114 @@ func (spw *Worker) nextStateProofRound(latest basics.Round) basics.Round {
return nextrnd
}
-func (spw *Worker) signStateProof(hdr bookkeeping.BlockHeader) {
- proto := config.Consensus[hdr.CurrentProtocol]
+func (spw *Worker) signStateProof(round basics.Round) {
+ proto, err := spw.getProto(round)
+ if err != nil {
+ spw.log.Warnf("spw.signStateProof(%d): getProto: %v", round, err)
+ return
+ }
+
if proto.StateProofInterval == 0 {
return
}
// Only sign blocks that are a multiple of StateProofInterval.
- if hdr.Round%basics.Round(proto.StateProofInterval) != 0 {
+ if round%basics.Round(proto.StateProofInterval) != 0 {
return
}
- keys := spw.accts.StateProofKeys(hdr.Round)
+ keys := spw.accts.StateProofKeys(round)
if len(keys) == 0 {
// No keys, nothing to do.
return
}
- // votersRound is the round containing the merkle root commitment
- // for the voters that are going to sign this block.
- votersRound := hdr.Round.SubSaturate(basics.Round(proto.StateProofInterval))
- votersHdr, err := spw.ledger.BlockHdr(votersRound)
+ stateProofMessage, err := spw.getStateProofMessage(round)
if err != nil {
- spw.log.Warnf("spw.signBlock(%d): BlockHdr(%d): %v", hdr.Round, votersRound, err)
+ spw.log.Warnf("spw.signStateProof(%d): getStateProofMessage: %v", round, err)
return
}
- if votersHdr.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment.IsEmpty() {
- // No voter commitment, perhaps because state proofs were
- // just enabled.
- return
+ spw.signStateProofMessage(&stateProofMessage, round, keys)
+}
+
+func (spw *Worker) getProto(round basics.Round) (*config.ConsensusParams, error) {
+ protoHdr, err := spw.ledger.BlockHdr(round)
+ if err != nil {
+ // IMPORTANT: This doesn't support modification of the state proof interval at the moment. Actually supporting
+ // it will probably require using (and slightly modifying) the stateProofVerificationTracker.
+ latestRound := spw.ledger.Latest()
+ protoHdr, err = spw.ledger.BlockHdr(latestRound)
+ if err != nil {
+ return nil, err
+ }
}
- sigs := make([]sigFromAddr, 0, len(keys))
- ids := make([]account.ParticipationID, 0, len(keys))
- usedSigners := make([]*merklesignature.Signer, 0, len(keys))
+ proto := config.Consensus[protoHdr.CurrentProtocol]
+ return &proto, nil
+}
- stateproofMessage, err := GenerateStateProofMessage(spw.ledger, uint64(votersHdr.Round), hdr)
- if err != nil {
- spw.log.Warnf("spw.signBlock(%d): GenerateStateProofMessage: %v", hdr.Round, err)
- return
+func (spw *Worker) getStateProofMessage(round basics.Round) (stateproofmsg.Message, error) {
+ var msg stateproofmsg.Message
+ err := spw.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ msg, err = getMessage(tx, round)
+ return err
+ })
+ if err == nil {
+ return msg, nil
}
- hashedStateproofMessage := stateproofMessage.Hash()
+ if !errors.Is(err, sql.ErrNoRows) {
+ spw.log.Errorf("getStateProofMessage(%d): error while fetching prover from DB: %v", round, err)
+ }
+
+ return GenerateStateProofMessage(spw.ledger, round)
+}
+
+func (spw *Worker) signStateProofMessage(message *stateproofmsg.Message, round basics.Round, keys []account.StateProofSecretsForRound) {
+ hashedStateproofMessage := message.Hash()
+
+ sigs := make([]sigFromAddr, 0, len(keys))
for _, key := range keys {
- if key.FirstValid > hdr.Round || hdr.Round > key.LastValid {
+ if key.FirstValid > round || round > key.LastValid {
continue
}
if key.StateProofSecrets == nil {
- spw.log.Warnf("spw.signBlock(%d): empty state proof secrets for round", hdr.Round)
+ spw.log.Warnf("spw.signStateProofMessage(%d): empty state proof secrets for round", round)
+ continue
+ }
+
+ var exists bool
+ err := spw.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ exists, err = sigExistsInDB(tx, round, key.Account)
+ return err
+ })
+ if err != nil {
+ spw.log.Warnf("spw.signStateProofMessage(%d): couldn't figure if sig exists in DB: %v", round, err)
+ } else if exists {
continue
}
sig, err := key.StateProofSecrets.SignBytes(hashedStateproofMessage[:])
if err != nil {
- spw.log.Warnf("spw.signBlock(%d): StateProofSecrets.Sign: %v", hdr.Round, err)
+ spw.log.Warnf("spw.signStateProofMessage(%d): StateProofSecrets.Sign: %v", round, err)
continue
}
sigs = append(sigs, sigFromAddr{
SignerAddress: key.Account,
- Round: hdr.Round,
+ Round: round,
Sig: sig,
})
- ids = append(ids, key.ParticipationID)
- usedSigners = append(usedSigners, key.StateProofSecrets)
}
// any error in handle sig indicates the signature wasn't stored in disk, thus we cannot delete the key.
- for i, sfa := range sigs {
+ for _, sfa := range sigs {
if _, err := spw.handleSig(sfa, nil); err != nil {
- spw.log.Warnf("spw.signBlock(%d): handleSig: %v", hdr.Round, err)
- continue
- }
-
- spw.log.Infof("spw.signBlock(%d): sp message was signed with address %v", hdr.Round, sfa.SignerAddress)
- firstRoundInKeyLifetime, err := usedSigners[i].FirstRoundInKeyLifetime() // Calculate first round of the key in order to delete all previous keys (and keep the current one for now)
- if err != nil {
- spw.log.Warnf("spw.signBlock(%d): Signer.FirstRoundInKeyLifetime: %v", hdr.Round, err)
+ spw.log.Warnf("spw.signStateProofMessage(%d): handleSig: %v", round, err)
continue
}
- if firstRoundInKeyLifetime == 0 {
- continue // No previous keys to delete (also underflows when subtracting 1)
- }
-
- // Safe to delete key for sfa.Round because the signature is now stored in the disk.
- if err := spw.accts.DeleteStateProofKey(ids[i], basics.Round(firstRoundInKeyLifetime-1)); err != nil { // Subtract 1 to delete all keys up to this one
- spw.log.Warnf("spw.signBlock(%d): DeleteStateProofKey: %v", hdr.Round, err)
- }
+ spw.log.Infof("spw.signStateProofMessage(%d): sp message was signed with address %v", round, sfa.SignerAddress)
}
}
diff --git a/stateproof/stateproofMessageGenerator.go b/stateproof/stateproofMessageGenerator.go
index 92befc631..3b5008f2a 100644
--- a/stateproof/stateproofMessageGenerator.go
+++ b/stateproof/stateproofMessageGenerator.go
@@ -51,8 +51,14 @@ func (b lightBlockHeaders) Marshal(pos uint64) (crypto.Hashable, error) {
// GenerateStateProofMessage returns a stateproof message that contains all the necessary data for proving on Algorand's state.
// In addition, it also includes the trusted data for the next stateproof verification
-func GenerateStateProofMessage(l BlockHeaderFetcher, votersRound uint64, latestRoundHeader bookkeeping.BlockHeader) (stateproofmsg.Message, error) {
+func GenerateStateProofMessage(l BlockHeaderFetcher, round basics.Round) (stateproofmsg.Message, error) {
+ latestRoundHeader, err := l.BlockHdr(round)
+ if err != nil {
+ return stateproofmsg.Message{}, err
+ }
+
proto := config.Consensus[latestRoundHeader.CurrentProtocol]
+ votersRound := uint64(round.SubSaturate(basics.Round(proto.StateProofInterval)))
commitment, err := createHeaderCommitment(l, &proto, &latestRoundHeader)
if err != nil {
return stateproofmsg.Message{}, err
diff --git a/stateproof/stateproofMessageGenerator_test.go b/stateproof/stateproofMessageGenerator_test.go
index 26c47ec8a..a990143b0 100644
--- a/stateproof/stateproofMessageGenerator_test.go
+++ b/stateproof/stateproofMessageGenerator_test.go
@@ -17,14 +17,10 @@
package stateproof
import (
- "context"
+ "github.com/stretchr/testify/require"
"testing"
"time"
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-deadlock"
-
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merklearray"
@@ -35,146 +31,10 @@ import (
"github.com/algorand/go-algorand/data/stateproofmsg"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/ledger/ledgercore"
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
)
-type workerForStateProofMessageTests struct {
- w *testWorkerStubs
-}
-
-func (s *workerForStateProofMessageTests) StateProofKeys(round basics.Round) []account.StateProofSecretsForRound {
- return s.w.StateProofKeys(round)
-}
-
-func (s *workerForStateProofMessageTests) DeleteStateProofKey(id account.ParticipationID, round basics.Round) error {
- return s.w.DeleteStateProofKey(id, round)
-}
-
-func (s *workerForStateProofMessageTests) Latest() basics.Round {
- return s.w.Latest()
-}
-
-func (s *workerForStateProofMessageTests) Wait(round basics.Round) chan struct{} {
- return s.w.Wait(round)
-}
-
-func (s *workerForStateProofMessageTests) GenesisHash() crypto.Digest {
- return s.w.GenesisHash()
-}
-
-func (s *workerForStateProofMessageTests) BlockHdr(round basics.Round) (bookkeeping.BlockHeader, error) {
- s.w.mu.Lock()
- defer s.w.mu.Unlock()
-
- element, ok := s.w.blocks[round]
- if !ok {
- return bookkeeping.BlockHeader{}, ledgercore.ErrNoEntry{Round: round}
- }
- return element, nil
-}
-
-func (s *workerForStateProofMessageTests) VotersForStateProof(round basics.Round) (*ledgercore.VotersForRound, error) {
- voters := &ledgercore.VotersForRound{
- Proto: config.Consensus[protocol.ConsensusCurrentVersion],
- AddrToPos: make(map[basics.Address]uint64),
- }
-
- wt := uint64(0)
- for i, k := range s.w.keysForVoters {
- partWe := uint64((len(s.w.keysForVoters) + int(round) - i) * 10000)
- voters.AddrToPos[k.Parent] = uint64(i)
- voters.Participants = append(voters.Participants, basics.Participant{
- PK: *k.StateProofSecrets.GetVerifier(),
- Weight: partWe,
- })
- wt += partWe
- }
-
- tree, err := merklearray.BuildVectorCommitmentTree(voters.Participants, crypto.HashFactory{HashType: stateproof.HashType})
- if err != nil {
- return nil, err
- }
-
- voters.Tree = tree
- voters.TotalWeight = basics.MicroAlgos{Raw: wt}
- return voters, nil
-}
-
-func (s *workerForStateProofMessageTests) Broadcast(ctx context.Context, tag protocol.Tag, bytes []byte, b bool, peer network.Peer) error {
- return s.w.Broadcast(ctx, tag, bytes, b, peer)
-}
-
-func (s *workerForStateProofMessageTests) RegisterHandlers(handlers []network.TaggedMessageHandler) {
- s.w.RegisterHandlers(handlers)
-}
-
-func (s *workerForStateProofMessageTests) BroadcastInternalSignedTxGroup(txns []transactions.SignedTxn) error {
- return s.w.BroadcastInternalSignedTxGroup(txns)
-}
-
-func (s *workerForStateProofMessageTests) addBlockWithStateProofHeaders(ccNextRound basics.Round) {
-
- s.w.latest++
-
- hdr := bookkeeping.BlockHeader{}
- hdr.Round = s.w.latest
- hdr.CurrentProtocol = protocol.ConsensusCurrentVersion
-
- var ccBasic = bookkeeping.StateProofTrackingData{
- StateProofVotersCommitment: make([]byte, stateproof.HashSize),
- StateProofOnlineTotalWeight: basics.MicroAlgos{},
- StateProofNextRound: 0,
- }
-
- if uint64(hdr.Round)%config.Consensus[hdr.CurrentProtocol].StateProofInterval == 0 {
- voters, _ := s.VotersForStateProof(hdr.Round.SubSaturate(basics.Round(config.Consensus[hdr.CurrentProtocol].StateProofVotersLookback)))
- ccBasic.StateProofVotersCommitment = voters.Tree.Root()
- ccBasic.StateProofOnlineTotalWeight = voters.TotalWeight
-
- }
-
- ccBasic.StateProofNextRound = ccNextRound
- hdr.StateProofTracking = map[protocol.StateProofType]bookkeeping.StateProofTrackingData{
- protocol.StateProofBasic: ccBasic,
- }
-
- s.w.blocks[s.w.latest] = hdr
- if s.w.waiters[s.w.latest] != nil {
- close(s.w.waiters[s.w.latest])
- }
-}
-
-func newWorkerForStateProofMessageStubs(keys []account.Participation, totalWeight int) *workerForStateProofMessageTests {
- s := &testWorkerStubs{
- t: nil,
- mu: deadlock.Mutex{},
- latest: 0,
- waiters: make(map[basics.Round]chan struct{}),
- waitersCount: make(map[basics.Round]int),
- blocks: make(map[basics.Round]bookkeeping.BlockHeader),
- keys: keys,
- keysForVoters: keys,
- sigmsg: make(chan []byte, 1024),
- txmsg: make(chan transactions.SignedTxn, 1024),
- totalWeight: totalWeight,
- deletedStateProofKeys: map[account.ParticipationID]basics.Round{},
- }
- sm := workerForStateProofMessageTests{w: s}
- return &sm
-}
-
-func (s *workerForStateProofMessageTests) advanceLatest(delta uint64) {
- s.w.mu.Lock()
- defer s.w.mu.Unlock()
-
- for r := uint64(0); r < delta; r++ {
- s.addBlockWithStateProofHeaders(s.w.blocks[s.w.latest].StateProofTracking[protocol.StateProofBasic].StateProofNextRound)
- }
-}
-
func TestStateProofMessage(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
@@ -188,51 +48,41 @@ func TestStateProofMessage(t *testing.T) {
keys = append(keys, p.Participation)
}
- s := newWorkerForStateProofMessageStubs(keys, len(keys))
- dbs, _ := dbOpenTest(t, true)
- w := NewWorker(dbs.Wdb, logging.TestingLog(t), s, s, s, s)
-
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
- s.w.latest--
- s.addBlockWithStateProofHeaders(2 * basics.Round(proto.StateProofInterval))
-
+ s := newWorkerStubsWithChannel(t, keys, len(keys))
+ s.sigmsg = nil
+ w := newTestWorker(t, s)
w.Start()
- defer w.Shutdown()
-
- s.advanceLatest(proto.StateProofInterval + proto.StateProofInterval/2)
+ defer w.Stop()
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ s.advanceRoundsWithoutStateProof(t, 1)
var lastMessage stateproofmsg.Message
+ for i := 0; i < 5; i++ {
+ s.advanceRoundsWithoutStateProof(t, proto.StateProofInterval-1)
- for iter := uint64(0); iter < 5; iter++ {
- s.advanceLatest(proto.StateProofInterval)
-
+ var tx transactions.SignedTxn
+ // there will be several state proof txn. we extract them
for {
- tx, err := s.w.waitOnTxnWithTimeout(time.Second * 5)
+ var err error
+ tx, err = s.waitOnTxnWithTimeout(time.Second * 5)
a.NoError(err)
-
- a.Equal(tx.Txn.Type, protocol.StateProofTx)
-
- lastAttestedRound := basics.Round(tx.Txn.Message.LastAttestedRound)
- if lastAttestedRound < basics.Round(iter+2)*basics.Round(proto.StateProofInterval) {
- continue
+ if lastMessage.LastAttestedRound == 0 || lastMessage.LastAttestedRound < tx.Txn.Message.LastAttestedRound {
+ break
}
- a.Equal(lastAttestedRound, basics.Round(iter+2)*basics.Round(proto.StateProofInterval))
- a.Equal(tx.Txn.Message.FirstAttestedRound, (iter+1)*proto.StateProofInterval+1)
-
- verifySha256BlockHeadersCommitments(a, tx.Txn.Message, s.w.blocks)
-
- if !lastMessage.MsgIsZero() {
- verifier := stateproof.MkVerifierWithLnProvenWeight(lastMessage.VotersCommitment, lastMessage.LnProvenWeight, proto.StateProofStrengthTarget)
-
- err := verifier.Verify(uint64(lastAttestedRound), tx.Txn.Message.Hash(), &tx.Txn.StateProof)
- a.NoError(err)
+ }
- }
+ verifySha256BlockHeadersCommitments(a, tx.Txn.Message, s.blocks)
+ if !lastMessage.MsgIsZero() {
+ verifier := stateproof.MkVerifierWithLnProvenWeight(lastMessage.VotersCommitment, lastMessage.LnProvenWeight, proto.StateProofStrengthTarget)
- lastMessage = tx.Txn.Message
- break
+ err := verifier.Verify(tx.Txn.Message.LastAttestedRound, tx.Txn.Message.Hash(), &tx.Txn.StateProof)
+ a.NoError(err)
}
+ // since a state proof txn was created, we update the header with the next state proof round
+ // i.e network has accepted the state proof.
+ s.addBlock(basics.Round(tx.Txn.Message.LastAttestedRound + proto.StateProofInterval))
+ lastMessage = tx.Txn.Message
}
}
@@ -262,12 +112,11 @@ func TestGenerateStateProofMessageForSmallRound(t *testing.T) {
keys = append(keys, p.Participation)
}
- s := newWorkerForStateProofMessageStubs(keys[:], len(keys))
+ s := newWorkerStubAtGenesis(t, keys[:], len(keys))
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- s.w.latest--
- s.addBlockWithStateProofHeaders(2 * basics.Round(proto.StateProofInterval))
+ s.addBlock(2 * basics.Round(proto.StateProofInterval))
- _, err := GenerateStateProofMessage(s, 240, s.w.blocks[s.w.latest])
+ _, err := GenerateStateProofMessage(s, s.latest)
a.ErrorIs(err, errInvalidParams)
}
@@ -284,18 +133,19 @@ func TestMessageLnApproxError(t *testing.T) {
keys = append(keys, p.Participation)
}
- s := newWorkerForStateProofMessageStubs(keys[:], len(keys))
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
- s.w.latest--
- s.addBlockWithStateProofHeaders(2 * basics.Round(proto.StateProofInterval))
+ s := newWorkerStubs(t, keys[:], len(keys))
+ w := newTestWorker(t, s)
+ w.Start()
+ defer w.Stop()
- s.advanceLatest(2*proto.StateProofInterval + proto.StateProofInterval/2)
- tracking := s.w.blocks[512].StateProofTracking[protocol.StateProofBasic]
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ s.advanceRoundsWithoutStateProof(t, proto.StateProofInterval)
+ tracking := s.blocks[512].StateProofTracking[protocol.StateProofBasic]
tracking.StateProofOnlineTotalWeight = basics.MicroAlgos{}
newtracking := tracking
- s.w.blocks[512].StateProofTracking[protocol.StateProofBasic] = newtracking
+ s.blocks[512].StateProofTracking[protocol.StateProofBasic] = newtracking
- _, err := GenerateStateProofMessage(s, 256, s.w.blocks[512])
+ _, err := GenerateStateProofMessage(s, basics.Round(2*proto.StateProofInterval))
a.ErrorIs(err, stateproof.ErrIllegalInputForLnApprox)
}
@@ -312,16 +162,17 @@ func TestMessageMissingHeaderOnInterval(t *testing.T) {
keys = append(keys, p.Participation)
}
- s := newWorkerForStateProofMessageStubs(keys[:], len(keys))
- s.w.latest--
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
- s.addBlockWithStateProofHeaders(2 * basics.Round(proto.StateProofInterval))
+ s := newWorkerStubs(t, keys[:], len(keys))
+ w := newTestWorker(t, s)
+ w.Start()
+ defer w.Stop()
- s.advanceLatest(2*proto.StateProofInterval + proto.StateProofInterval/2)
- delete(s.w.blocks, 510)
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ s.advanceRoundsWithoutStateProof(t, proto.StateProofInterval)
+ delete(s.blocks, 510)
- _, err := GenerateStateProofMessage(s, 256, s.w.blocks[512])
- a.ErrorIs(err, ledgercore.ErrNoEntry{Round: 510})
+ _, err := GenerateStateProofMessage(s, basics.Round(2*proto.StateProofInterval))
+ a.ErrorIs(err, ledgercore.ErrNoEntry{Round: 510, Latest: s.latest, Committed: s.latest})
}
func TestGenerateBlockProof(t *testing.T) {
@@ -337,46 +188,55 @@ func TestGenerateBlockProof(t *testing.T) {
keys = append(keys, p.Participation)
}
- s := newWorkerForStateProofMessageStubs(keys, len(keys))
- dbs, _ := dbOpenTest(t, true)
- w := NewWorker(dbs.Wdb, logging.TestingLog(t), s, s, s, s)
+ s := newWorkerStubsWithChannel(t, keys, len(keys))
+ s.sigmsg = nil
+ w := newTestWorker(t, s)
+ w.Start()
+ defer w.Stop()
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- s.w.latest--
- s.addBlockWithStateProofHeaders(2 * basics.Round(proto.StateProofInterval))
+ s.advanceRoundsWithoutStateProof(t, 1)
+ var lastAttestedRound basics.Round
+ for i := 0; i < 5; i++ {
+ s.advanceRoundsWithoutStateProof(t, proto.StateProofInterval-1)
- w.Start()
- defer w.Shutdown()
+ var tx transactions.SignedTxn
+ // there will be several state proof txn. we extract them
+ for {
+ var err error
+ tx, err = s.waitOnTxnWithTimeout(time.Second * 5)
+ a.NoError(err)
+ if lastAttestedRound == 0 || lastAttestedRound < basics.Round(tx.Txn.Message.LastAttestedRound) {
+ break
+ }
- s.advanceLatest(proto.StateProofInterval + proto.StateProofInterval/2)
+ }
+ headers, err := FetchLightHeaders(s, proto.StateProofInterval, basics.Round(tx.Txn.Message.LastAttestedRound))
+ a.NoError(err)
+ a.Equal(proto.StateProofInterval, uint64(len(headers)))
- for iter := uint64(0); iter < 5; iter++ {
- s.advanceLatest(proto.StateProofInterval)
+ verifyLightBlockHeaderProof(&tx, &proto, headers, a)
- tx := <-s.w.txmsg
- // we have a new tx. now attempt to fetch a block proof.
- firstAttestedRound := tx.Txn.Message.FirstAttestedRound
- lastAttestedRound := tx.Txn.Message.LastAttestedRound
+ s.addBlock(basics.Round(tx.Txn.Message.LastAttestedRound + proto.StateProofInterval))
+ lastAttestedRound = basics.Round(tx.Txn.Message.LastAttestedRound)
+ }
+}
- headers, err := FetchLightHeaders(s, proto.StateProofInterval, basics.Round(lastAttestedRound))
+func verifyLightBlockHeaderProof(tx *transactions.SignedTxn, proto *config.ConsensusParams, headers []bookkeeping.LightBlockHeader, a *require.Assertions) {
+ // attempting to get block proof for every block in the interval
+ for j := tx.Txn.Message.FirstAttestedRound; j < tx.Txn.Message.LastAttestedRound; j++ {
+ headerIndex := j - tx.Txn.Message.FirstAttestedRound
+ proof, err := GenerateProofOfLightBlockHeaders(proto.StateProofInterval, headers, headerIndex)
a.NoError(err)
- a.Equal(proto.StateProofInterval, uint64(len(headers)))
-
- // attempting to get block proof for every block in the interval
- for i := firstAttestedRound; i < lastAttestedRound; i++ {
- headerIndex := i - firstAttestedRound
- proof, err := GenerateProofOfLightBlockHeaders(proto.StateProofInterval, headers, headerIndex)
- a.NoError(err)
- a.NotNil(proof)
+ a.NotNil(proof)
- lightheader := headers[headerIndex]
- err = merklearray.VerifyVectorCommitment(
- tx.Txn.Message.BlockHeadersCommitment,
- map[uint64]crypto.Hashable{headerIndex: &lightheader},
- proof.ToProof())
+ lightheader := headers[headerIndex]
+ err = merklearray.VerifyVectorCommitment(
+ tx.Txn.Message.BlockHeadersCommitment,
+ map[uint64]crypto.Hashable{headerIndex: &lightheader},
+ proof.ToProof())
- a.NoError(err)
- }
+ a.NoError(err)
}
}
@@ -385,7 +245,7 @@ func TestGenerateBlockProofOnSmallArray(t *testing.T) {
a := require.New(t)
var keys []account.Participation
- for i := 0; i < 10; i++ {
+ for i := 0; i < 2; i++ {
var parent basics.Address
crypto.RandBytes(parent[:])
p := newPartKey(t, parent)
@@ -393,13 +253,13 @@ func TestGenerateBlockProofOnSmallArray(t *testing.T) {
keys = append(keys, p.Participation)
}
- s := newWorkerForStateProofMessageStubs(keys, len(keys))
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
-
- s.w.latest--
- s.addBlockWithStateProofHeaders(2 * basics.Round(proto.StateProofInterval))
+ s := newWorkerStubs(t, keys[:], len(keys))
+ w := newTestWorker(t, s)
+ w.Start()
+ defer w.Stop()
- s.advanceLatest(2 * proto.StateProofInterval)
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ s.advanceRoundsWithoutStateProof(t, proto.StateProofInterval)
headers, err := FetchLightHeaders(s, proto.StateProofInterval, basics.Round(2*proto.StateProofInterval))
a.NoError(err)
headers = headers[1:]
diff --git a/stateproof/verify/stateproof.go b/stateproof/verify/stateproof.go
index fa3641c08..3caaabac8 100644
--- a/stateproof/verify/stateproof.go
+++ b/stateproof/verify/stateproof.go
@@ -25,17 +25,16 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/stateproofmsg"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
)
var (
- errStateProofCrypto = errors.New("state proof crypto error")
- errStateProofParamCreation = errors.New("state proof param creation error")
- errStateProofNotEnabled = errors.New("state proofs are not enabled")
- errNotAtRightMultiple = errors.New("state proof is not in a valid round multiple")
- errInvalidVotersRound = errors.New("invalid voters round")
- errInsufficientWeight = errors.New("insufficient state proof weight")
+ errStateProofCrypto = errors.New("state proof crypto error")
+ errStateProofNotEnabled = errors.New("state proofs are not enabled")
+ errNotAtRightMultiple = errors.New("state proof is not in a valid round multiple")
+ errInsufficientWeight = errors.New("insufficient state proof weight")
)
// AcceptableStateProofWeight computes the acceptable signed weight
@@ -51,11 +50,16 @@ func AcceptableStateProofWeight(votersHdr *bookkeeping.BlockHeader, firstValid b
latestRoundInProof := votersHdr.Round + basics.Round(proto.StateProofInterval)
total := votersHdr.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight
+ return calculateAcceptableStateProofWeight(total, &proto, latestRoundInProof, firstValid, logger)
+}
+
+func calculateAcceptableStateProofWeight(total basics.MicroAlgos, proto *config.ConsensusParams, lastAttestedRound basics.Round, firstValid basics.Round, logger logging.Logger) uint64 {
+ halfPeriodForInterval := proto.StateProofInterval / 2
// The acceptable weight depends on the elapsed time (in rounds)
// from the block we are trying to construct a proof for.
// Start by subtracting the latest round number in the state proof interval.
// If that round hasn't even passed yet, require 100% votes in proof.
- offset := firstValid.SubSaturate(latestRoundInProof)
+ offset := firstValid.SubSaturate(lastAttestedRound)
if offset == 0 {
return total.ToUint64()
}
@@ -63,7 +67,7 @@ func AcceptableStateProofWeight(votersHdr *bookkeeping.BlockHeader, firstValid b
// During the first proto.StateProofInterval/2 blocks, the
// signatures are still being broadcast, so, continue requiring
// 100% votes.
- offset = offset.SubSaturate(basics.Round(proto.StateProofInterval / 2))
+ offset = offset.SubSaturate(basics.Round(halfPeriodForInterval))
if offset == 0 {
return total.ToUint64()
}
@@ -75,35 +79,35 @@ func AcceptableStateProofWeight(votersHdr *bookkeeping.BlockHeader, firstValid b
provenWeight, overflowed := basics.Muldiv(total.ToUint64(), uint64(proto.StateProofWeightThreshold), 1<<32)
if overflowed || provenWeight > total.ToUint64() {
// Shouldn't happen, but a safe fallback is to accept a larger proof.
- logger.Warnf("AcceptableStateProofWeight(%d, %d, %d, %d) overflow provenWeight",
- total, proto.StateProofInterval, latestRoundInProof, firstValid)
+ logger.Warnf("calculateAcceptableStateProofWeight(%d, %d, %d, %d) overflow provenWeight",
+ total, proto.StateProofInterval, lastAttestedRound, firstValid)
return 0
}
- if offset >= basics.Round(proto.StateProofInterval/2) {
+ if offset >= basics.Round(halfPeriodForInterval) {
return provenWeight
}
- scaledWeight, overflowed := basics.Muldiv(total.ToUint64()-provenWeight, proto.StateProofInterval/2-uint64(offset), proto.StateProofInterval/2)
+ scaledWeight, overflowed := basics.Muldiv(total.ToUint64()-provenWeight, halfPeriodForInterval-uint64(offset), halfPeriodForInterval)
if overflowed {
// Shouldn't happen, but a safe fallback is to accept a larger state proof.
- logger.Warnf("AcceptableStateProofWeight(%d, %d, %d, %d) overflow scaledWeight",
- total, proto.StateProofInterval, latestRoundInProof, firstValid)
+ logger.Warnf("calculateAcceptableStateProofWeight(%d, %d, %d, %d) overflow scaledWeight",
+ total, proto.StateProofInterval, lastAttestedRound, firstValid)
return 0
}
w, overflowed := basics.OAdd(provenWeight, scaledWeight)
if overflowed {
// Shouldn't happen, but a safe fallback is to accept a larger state proof.
- logger.Warnf("AcceptableStateProofWeight(%d, %d, %d, %d) overflow provenWeight (%d) + scaledWeight (%d)",
- total, proto.StateProofInterval, latestRoundInProof, firstValid, provenWeight, scaledWeight)
+ logger.Warnf("calculateAcceptableStateProofWeight(%d, %d, %d, %d) overflow provenWeight (%d) + scaledWeight (%d)",
+ total, proto.StateProofInterval, lastAttestedRound, firstValid, provenWeight, scaledWeight)
return 0
}
return w
}
-// GetProvenWeight computes the parameters for building or verifying
+// GetProvenWeight computes the parameters for proving or verifying
// a state proof for the interval (votersHdr, latestRoundInProofHdr], using voters from block votersHdr.
func GetProvenWeight(votersHdr *bookkeeping.BlockHeader, latestRoundInProofHdr *bookkeeping.BlockHeader) (uint64, error) {
proto := config.Consensus[votersHdr.CurrentProtocol]
@@ -136,42 +140,38 @@ func GetProvenWeight(votersHdr *bookkeeping.BlockHeader, latestRoundInProofHdr *
}
// ValidateStateProof checks that a state proof is valid.
-func ValidateStateProof(latestRoundInIntervalHdr *bookkeeping.BlockHeader, stateProof *stateproof.StateProof, votersHdr *bookkeeping.BlockHeader, atRound basics.Round, msg *stateproofmsg.Message) error {
- proto := config.Consensus[latestRoundInIntervalHdr.CurrentProtocol]
+func ValidateStateProof(verificationContext *ledgercore.StateProofVerificationContext, stateProof *stateproof.StateProof, atRound basics.Round, msg *stateproofmsg.Message) error {
+ proto := config.Consensus[verificationContext.Version]
if proto.StateProofInterval == 0 {
return fmt.Errorf("rounds = %d: %w", proto.StateProofInterval, errStateProofNotEnabled)
}
- if latestRoundInIntervalHdr.Round%basics.Round(proto.StateProofInterval) != 0 {
- return fmt.Errorf("state proof at %d for non-multiple of %d: %w", latestRoundInIntervalHdr.Round, proto.StateProofInterval, errNotAtRightMultiple)
- }
-
- votersRound := latestRoundInIntervalHdr.Round.SubSaturate(basics.Round(proto.StateProofInterval))
- if votersRound != votersHdr.Round {
- return fmt.Errorf("new state proof is for %d (voters %d), but votersHdr from %d: %w",
- latestRoundInIntervalHdr.Round, votersRound, votersHdr.Round, errInvalidVotersRound)
+ if verificationContext.LastAttestedRound%basics.Round(proto.StateProofInterval) != 0 {
+ return fmt.Errorf("state proof at %d for non-multiple of %d: %w", verificationContext.LastAttestedRound, proto.StateProofInterval, errNotAtRightMultiple)
}
- acceptableWeight := AcceptableStateProofWeight(votersHdr, atRound, logging.Base())
+ acceptableWeight := calculateAcceptableStateProofWeight(verificationContext.OnlineTotalWeight, &proto, verificationContext.LastAttestedRound, atRound, logging.Base())
if stateProof.SignedWeight < acceptableWeight {
return fmt.Errorf("insufficient weight at round %d: %d < %d: %w",
atRound, stateProof.SignedWeight, acceptableWeight, errInsufficientWeight)
}
- provenWeight, err := GetProvenWeight(votersHdr, latestRoundInIntervalHdr)
- if err != nil {
- return fmt.Errorf("%v: %w", err, errStateProofParamCreation)
+ provenWeight, overflowed := basics.Muldiv(verificationContext.OnlineTotalWeight.ToUint64(), uint64(proto.StateProofWeightThreshold), 1<<32)
+ if overflowed {
+ return fmt.Errorf("overflow computing provenWeight[%d]: %d * %d / (1<<32)",
+ verificationContext.LastAttestedRound, verificationContext.OnlineTotalWeight.ToUint64(), proto.StateProofWeightThreshold)
+
}
- verifier, err := stateproof.MkVerifier(votersHdr.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment,
+ verifier, err := stateproof.MkVerifier(verificationContext.VotersCommitment,
provenWeight,
- config.Consensus[votersHdr.CurrentProtocol].StateProofStrengthTarget)
+ proto.StateProofStrengthTarget)
if err != nil {
return err
}
- err = verifier.Verify(uint64(latestRoundInIntervalHdr.Round), msg.Hash(), stateProof)
+ err = verifier.Verify(uint64(verificationContext.LastAttestedRound), msg.Hash(), stateProof)
if err != nil {
return fmt.Errorf("%v: %w", err, errStateProofCrypto)
}
diff --git a/stateproof/verify/stateproof_test.go b/stateproof/verify/stateproof_test.go
index 0495729dd..b092bad8f 100644
--- a/stateproof/verify/stateproof_test.go
+++ b/stateproof/verify/stateproof_test.go
@@ -17,20 +17,33 @@
package verify
import (
- "testing"
-
- "github.com/stretchr/testify/require"
-
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto/stateproof"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/stateproofmsg"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+ "testing"
)
+func invokeValidateStateProof(latestRoundInIntervalHdr *bookkeeping.BlockHeader,
+ stateProof *stateproof.StateProof,
+ votersHdr *bookkeeping.BlockHeader,
+ atRound basics.Round,
+ msg *stateproofmsg.Message) error {
+ verificationContext := ledgercore.StateProofVerificationContext{
+ LastAttestedRound: latestRoundInIntervalHdr.Round,
+ VotersCommitment: votersHdr.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment,
+ OnlineTotalWeight: votersHdr.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight,
+ Version: votersHdr.CurrentProtocol,
+ }
+ return ValidateStateProof(&verificationContext, stateProof, atRound, msg)
+}
+
func TestValidateStateProof(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -41,55 +54,52 @@ func TestValidateStateProof(t *testing.T) {
msg := &stateproofmsg.Message{BlockHeadersCommitment: []byte("this is an arbitrary message")}
// will definitely fail with nothing set up
- err := ValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
+ err := invokeValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
require.ErrorIs(t, err, errStateProofNotEnabled)
- spHdr.CurrentProtocol = "TestValidateStateProof"
- spHdr.Round = 1
+ votersHdr.CurrentProtocol = "TestValidateStateProof"
proto := config.Consensus[spHdr.CurrentProtocol]
- proto.StateProofInterval = 2
+ proto.StateProofInterval = 256
proto.StateProofStrengthTarget = 256
proto.StateProofWeightThreshold = (1 << 32) * 30 / 100
- config.Consensus[spHdr.CurrentProtocol] = proto
+ config.Consensus[votersHdr.CurrentProtocol] = proto
- err = ValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
+ spHdr.Round = 1
+ err = invokeValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
require.ErrorIs(t, err, errNotAtRightMultiple)
- spHdr.Round = 4
- votersHdr.Round = 4
- err = ValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
- require.ErrorIs(t, err, errInvalidVotersRound)
-
- votersHdr.Round = 2
- err = ValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
- require.ErrorIs(t, err, errStateProofParamCreation)
-
- votersHdr.CurrentProtocol = spHdr.CurrentProtocol
- err = ValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
- // since proven weight is zero, we cann't create the verifier
+ votersHdr.Round = 256
+ spHdr.Round = votersHdr.Round + basics.Round(proto.StateProofInterval)
+ sp.SignedWeight = 1
+ atRound = 800
+ err = invokeValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
require.ErrorIs(t, err, stateproof.ErrIllegalInputForLnApprox)
votersHdr.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData)
cc := votersHdr.StateProofTracking[protocol.StateProofBasic]
cc.StateProofOnlineTotalWeight.Raw = 100
votersHdr.StateProofTracking[protocol.StateProofBasic] = cc
- err = ValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
- require.ErrorIs(t, err, errInsufficientWeight)
-
// Require 100% of the weight to be signed in order to accept stateproof before interval/2 rounds has passed from the latest round attested (optimal case)
sp.SignedWeight = 99 // suboptimal signed weight
- err = ValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
+ atRound = votersHdr.Round + basics.Round(proto.StateProofInterval)
+ err = invokeValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
require.ErrorIs(t, err, errInsufficientWeight)
+ atRound++
+ err = invokeValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
+ require.ErrorIs(t, err, errInsufficientWeight)
+
+ // we don't pass the scaled weight
+ sp.SignedWeight = 96
latestRoundInProof := votersHdr.Round + basics.Round(proto.StateProofInterval)
- atRound = latestRoundInProof + basics.Round(proto.StateProofInterval/2)
- err = ValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
+ atRound = latestRoundInProof + basics.Round(proto.StateProofInterval/2) + 5
+ err = invokeValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
require.ErrorIs(t, err, errInsufficientWeight)
- // This suboptimal signed weight should be enough for this round
- atRound++
- err = ValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
- // still err, but a different err case to cover
+ // we will pass the threshold since the network is now willing to take any state proof that has signedWeight over the threshold
+ sp.SignedWeight = 30
+ atRound = votersHdr.Round + 2*basics.Round(proto.StateProofInterval)
+ err = invokeValidateStateProof(spHdr, sp, votersHdr, atRound, msg)
require.ErrorIs(t, err, errStateProofCrypto)
// Above cases leave validateStateProof() with 100% coverage.
diff --git a/stateproof/worker.go b/stateproof/worker.go
index 857431f04..acc55a5d6 100644
--- a/stateproof/worker.go
+++ b/stateproof/worker.go
@@ -18,29 +18,26 @@ package stateproof
import (
"context"
- "database/sql"
+ "fmt"
+ "os"
+ "path/filepath"
"sync"
"github.com/algorand/go-deadlock"
- "github.com/algorand/go-algorand/crypto/stateproof"
+ "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/stateproofmsg"
- "github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/db"
)
-type builder struct {
- *stateproof.Builder
-
- voters *ledgercore.VotersForRound
- votersHdr bookkeeping.BlockHeader
- message stateproofmsg.Message
-}
+// This is a soft limit on how many provers should be kept in memory, the rest shall be fetched from DB.
+// At most times only 1 should prover should be stored (both in memory and on disk), as this feature
+// is mostly used for recoverability purposes - in case the StateProof chain is stalled.
+// The provers cache is composed of the X earliest provers as well as the latest prover, for a total of X+1 (in case of stalled chain).
+const proversCacheLength = 5 // must be at least 2 to function properly (earliest stateproof + latest stateproof)
// Worker builds state proofs, by broadcasting
// signatures using this node's participation keys, by collecting
@@ -51,53 +48,67 @@ type Worker struct {
// from the network stack.
mu deadlock.Mutex
- db db.Accessor
- log logging.Logger
- accts Accounts
- ledger Ledger
- net Network
- txnSender TransactionSender
+ spDbFileName string
+ db db.Accessor
+ log logging.Logger
+ accts Accounts
+ ledger Ledger
+ net Network
+ txnSender TransactionSender
- // builders is indexed by the round of the block being signed.
- builders map[basics.Round]builder
+ // provers is indexed by the round of the block being signed.
+ provers map[basics.Round]spProver
ctx context.Context
shutdown context.CancelFunc
wg sync.WaitGroup
+ // Mutex for protecting access to the signed field
+ signedMu deadlock.RWMutex
signed basics.Round
signedCh chan struct{}
+
+ lastCleanupRound basics.Round
+
+ // inMemory indicates whether the state proof db should in memory. used for testing.
+ inMemory bool
}
// NewWorker constructs a new Worker, as used by the node.
-func NewWorker(db db.Accessor, log logging.Logger, accts Accounts, ledger Ledger, net Network, txnSender TransactionSender) *Worker {
- ctx, cancel := context.WithCancel(context.Background())
+func NewWorker(genesisDir string, log logging.Logger, accts Accounts, ledger Ledger, net Network, txnSender TransactionSender) *Worker {
+ // Delete the deprecated database file if it exists. This can be removed in future updates since this file should not exist by then.
+ oldCompactCertPath := filepath.Join(genesisDir, "compactcert.sqlite")
+ os.Remove(oldCompactCertPath)
+
+ stateProofPathname := filepath.Join(genesisDir, config.StateProofFileName)
return &Worker{
- db: db,
- log: log,
- accts: accts,
- ledger: ledger,
- net: net,
- txnSender: txnSender,
- builders: make(map[basics.Round]builder),
- ctx: ctx,
- shutdown: cancel,
- signedCh: make(chan struct{}, 1),
+ spDbFileName: stateProofPathname,
+ log: log,
+ accts: accts,
+ ledger: ledger,
+ net: net,
+ txnSender: txnSender,
+ inMemory: false,
}
}
// Start starts the goroutines for the worker.
func (spw *Worker) Start() {
- err := spw.db.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- return initDB(tx)
- })
+ ctx, cancel := context.WithCancel(context.Background())
+ spw.ctx = ctx
+ spw.shutdown = cancel
+ spw.signedCh = make(chan struct{}, 1)
+
+ err := spw.initDb(spw.inMemory)
if err != nil {
- spw.log.Warnf("spw.Start(): initDB: %v", err)
+ spw.log.Warn(err)
return
}
- spw.initBuilders()
+ spw.initProvers()
+
+ spw.ledger.RegisterVotersCommitListener(spw)
handlers := []network.TaggedMessageHandler{
{Tag: protocol.StateProofSigTag, MessageHandler: network.HandlerFunc(spw.handleSigMessage)},
@@ -113,9 +124,44 @@ func (spw *Worker) Start() {
go spw.builder(latest)
}
-// Shutdown stops any goroutines associated with this worker.
-func (spw *Worker) Shutdown() {
+func (spw *Worker) initDb(inMemory bool) error {
+ stateProofAccess, err := db.MakeAccessor(spw.spDbFileName, false, inMemory)
+ if err != nil {
+ return fmt.Errorf("spw.initDb(): cannot load state proof data: %w", err)
+
+ }
+
+ spw.db = stateProofAccess
+ err = makeStateProofDB(spw.db)
+ if err != nil {
+ return fmt.Errorf("spw.initDb(): makeStateProofDB failed: %w", err)
+ }
+ return nil
+}
+
+// Stop stops any goroutines associated with this worker. It is the caller responsibility to remove the register
+// network handlers
+func (spw *Worker) Stop() {
spw.shutdown()
spw.wg.Wait()
- spw.db.Close()
+
+ spw.ledger.UnregisterVotersCommitListener()
+
+ // we take the lock in case the network handler currently running handleSig
+ spw.mu.Lock()
+ defer spw.mu.Unlock()
+
+ spw.provers = nil
+ spw.signedCh = nil
+
+ if spw.db.Handle != nil {
+ spw.db.Close()
+ }
}
+
+// SortAddress implements sorting by Address keys for
+// canonical encoding of maps in msgpack format.
+type SortAddress = basics.SortAddress
+
+// Address is required for the msgpack sort binding, since it looks for Address and not basics.Address
+type Address = basics.Address
diff --git a/stateproof/worker_test.go b/stateproof/worker_test.go
index bf1ec2636..68a19dcd2 100644
--- a/stateproof/worker_test.go
+++ b/stateproof/worker_test.go
@@ -20,8 +20,9 @@ import (
"context"
"database/sql"
"encoding/binary"
+ "errors"
"fmt"
- "io"
+ "os"
"strings"
"sync"
"testing"
@@ -49,57 +50,111 @@ import (
)
type testWorkerStubs struct {
- t testing.TB
- mu deadlock.Mutex
- latest basics.Round
- waiters map[basics.Round]chan struct{}
- waitersCount map[basics.Round]int
- blocks map[basics.Round]bookkeeping.BlockHeader
- keys []account.Participation
- keysForVoters []account.Participation
- sigmsg chan []byte
- txmsg chan transactions.SignedTxn
- totalWeight int
- deletedStateProofKeys map[account.ParticipationID]basics.Round
-}
-
-func newWorkerStubs(t testing.TB, keys []account.Participation, totalWeight int) *testWorkerStubs {
+ t testing.TB
+ mu deadlock.Mutex
+ listenerMu deadlock.RWMutex
+ latest basics.Round
+ waiters map[basics.Round]chan struct{}
+ waitersCount map[basics.Round]int
+ blocks map[basics.Round]bookkeeping.BlockHeader
+ keys []account.Participation
+ keysForVoters []account.Participation
+ sigmsg chan []byte
+ txmsg chan transactions.SignedTxn
+ totalWeight int
+ deletedKeysBeforeRoundMap map[account.ParticipationID]basics.Round
+ version protocol.ConsensusVersion
+ commitListener ledgercore.VotersCommitListener
+}
+
+func newWorkerStubs(t *testing.T, keys []account.Participation, totalWeight int) *testWorkerStubs {
+ return newWorkerStubsWithVersion(t, keys, protocol.ConsensusCurrentVersion, totalWeight)
+}
+
+func newWorkerStubsWithChannel(t *testing.T, keys []account.Participation, totalWeight int) *testWorkerStubs {
+ worker := newWorkerStubsWithVersion(t, keys, protocol.ConsensusCurrentVersion, totalWeight)
+ worker.sigmsg = make(chan []byte, 1024*1024)
+ worker.txmsg = make(chan transactions.SignedTxn, 1024)
+ return worker
+}
+
+func newWorkerStubAtGenesis(t *testing.T, keys []account.Participation, totalWeight int) *testWorkerStubs {
s := &testWorkerStubs{
- t: nil,
- mu: deadlock.Mutex{},
- latest: 0,
- waiters: make(map[basics.Round]chan struct{}),
- waitersCount: make(map[basics.Round]int),
- blocks: make(map[basics.Round]bookkeeping.BlockHeader),
- keys: keys,
- keysForVoters: keys,
- sigmsg: make(chan []byte, 1024*1024),
- txmsg: make(chan transactions.SignedTxn, 1024),
- totalWeight: totalWeight,
- deletedStateProofKeys: map[account.ParticipationID]basics.Round{},
+ t: t,
+ mu: deadlock.Mutex{},
+ listenerMu: deadlock.RWMutex{},
+ latest: 0,
+ waiters: make(map[basics.Round]chan struct{}),
+ waitersCount: make(map[basics.Round]int),
+ blocks: make(map[basics.Round]bookkeeping.BlockHeader),
+ keys: keys,
+ keysForVoters: keys,
+ sigmsg: nil,
+ txmsg: nil,
+ totalWeight: totalWeight,
+ deletedKeysBeforeRoundMap: map[account.ParticipationID]basics.Round{},
+ version: protocol.ConsensusCurrentVersion,
}
s.latest--
s.addBlock(2 * basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval))
return s
}
+func newWorkerStubsWithVersion(t *testing.T, keys []account.Participation, version protocol.ConsensusVersion, totalWeight int) *testWorkerStubs {
+ proto := config.Consensus[version]
+ s := &testWorkerStubs{
+ t: t,
+ mu: deadlock.Mutex{},
+ listenerMu: deadlock.RWMutex{},
+ latest: 0,
+ waiters: make(map[basics.Round]chan struct{}),
+ waitersCount: make(map[basics.Round]int),
+ blocks: make(map[basics.Round]bookkeeping.BlockHeader),
+ keys: keys,
+ keysForVoters: keys,
+ sigmsg: nil,
+ txmsg: nil,
+ totalWeight: totalWeight,
+ deletedKeysBeforeRoundMap: map[account.ParticipationID]basics.Round{},
+ version: version,
+ }
+ s.latest--
+ s.addBlock(2 * basics.Round(proto.StateProofInterval))
+ s.advanceRoundsBeforeFirstStateProof(&proto)
+ return s
+}
+
+func (s *testWorkerStubs) notifyPrepareVoterCommit(oldBase, newBase basics.Round) {
+ s.listenerMu.RLock()
+ defer s.listenerMu.RUnlock()
+
+ if s.commitListener == nil {
+ return
+ }
+
+ s.commitListener.OnPrepareVoterCommit(oldBase, newBase, s)
+}
+
func (s *testWorkerStubs) addBlock(spNextRound basics.Round) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
s.latest++
hdr := bookkeeping.BlockHeader{}
hdr.Round = s.latest
- hdr.CurrentProtocol = protocol.ConsensusCurrentVersion
-
+ hdr.CurrentProtocol = s.version
var stateProofBasic = bookkeeping.StateProofTrackingData{
StateProofVotersCommitment: make([]byte, stateproof.HashSize),
StateProofOnlineTotalWeight: basics.MicroAlgos{},
StateProofNextRound: 0,
}
- stateProofBasic.StateProofOnlineTotalWeight.Raw = uint64(s.totalWeight)
- if hdr.Round > 0 {
- // Just so it's not zero, since the signer logic checks for all-zeroes
- stateProofBasic.StateProofVotersCommitment[1] = 0x12
+ spInterval := config.Consensus[s.version].StateProofInterval
+ if spInterval != 0 && (hdr.Round > 0 && uint64(hdr.Round)%spInterval == 0) {
+ vt, _ := s.VotersForStateProof(hdr.Round)
+ stateProofBasic.StateProofOnlineTotalWeight = vt.TotalWeight
+ stateProofBasic.StateProofVotersCommitment = vt.Tree.Root()
}
stateProofBasic.StateProofNextRound = spNextRound
@@ -109,12 +164,16 @@ func (s *testWorkerStubs) addBlock(spNextRound basics.Round) {
s.blocks[s.latest] = hdr
+ s.waitersCount[s.latest] = 0
if s.waiters[s.latest] != nil {
close(s.waiters[s.latest])
+ s.waiters[s.latest] = nil
}
}
func (s *testWorkerStubs) StateProofKeys(rnd basics.Round) (out []account.StateProofSecretsForRound) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
for _, part := range s.keys {
partRecord := account.ParticipationRecord{
ParticipationID: part.ID(),
@@ -131,6 +190,18 @@ func (s *testWorkerStubs) StateProofKeys(rnd basics.Round) (out []account.StateP
Voting: part.Voting,
}
signerInRound := part.StateProofSecrets.GetSigner(uint64(rnd))
+ if signerInRound == nil {
+ continue
+ }
+ KeyInLifeTime, _ := signerInRound.FirstRoundInKeyLifetime()
+
+ // simulate that the key was removed
+ if basics.Round(KeyInLifeTime) < s.deletedKeysBeforeRoundMap[part.ID()] {
+ continue
+ }
+ if part.LastValid < rnd {
+ continue
+ }
partRecordForRound := account.StateProofSecretsForRound{
ParticipationRecord: partRecord,
StateProofSecrets: signerInRound,
@@ -142,14 +213,14 @@ func (s *testWorkerStubs) StateProofKeys(rnd basics.Round) (out []account.StateP
func (s *testWorkerStubs) DeleteStateProofKey(id account.ParticipationID, round basics.Round) error {
s.mu.Lock()
- s.deletedStateProofKeys[id] = round
+ s.deletedKeysBeforeRoundMap[id] = round
s.mu.Unlock()
return nil
}
func (s *testWorkerStubs) GetNumDeletedKeys() int {
s.mu.Lock()
- numDeltedKeys := len(s.deletedStateProofKeys)
+ numDeltedKeys := len(s.deletedKeysBeforeRoundMap)
s.mu.Unlock()
return numDeltedKeys
@@ -171,7 +242,32 @@ func (s *testWorkerStubs) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, err
return hdr, nil
}
+var errEmptyVoters = errors.New("ledger does not have voters")
+
+func (s *testWorkerStubs) RegisterVotersCommitListener(listener ledgercore.VotersCommitListener) {
+ s.listenerMu.Lock()
+ defer s.listenerMu.Unlock()
+ if s.commitListener != nil {
+ panic("re-register commit Listener")
+ }
+ s.commitListener = listener
+}
+
+func (s *testWorkerStubs) UnregisterVotersCommitListener() {
+ s.listenerMu.Lock()
+ defer s.listenerMu.Unlock()
+ s.commitListener = nil
+}
+
func (s *testWorkerStubs) VotersForStateProof(r basics.Round) (*ledgercore.VotersForRound, error) {
+ if r == 0 {
+ return nil, nil
+ }
+
+ if len(s.keysForVoters) == 0 {
+ return nil, errEmptyVoters
+ }
+
voters := &ledgercore.VotersForRound{
Proto: config.Consensus[protocol.ConsensusCurrentVersion],
AddrToPos: make(map[basics.Address]uint64),
@@ -195,6 +291,17 @@ func (s *testWorkerStubs) VotersForStateProof(r basics.Round) (*ledgercore.Voter
return voters, nil
}
+func (s *testWorkerStubs) StateProofVerificationContext(stateProofLastAttestedRound basics.Round) (*ledgercore.StateProofVerificationContext, error) {
+ dummyContext := ledgercore.StateProofVerificationContext{
+ LastAttestedRound: stateProofLastAttestedRound,
+ VotersCommitment: crypto.GenericDigest{0x1},
+ OnlineTotalWeight: basics.MicroAlgos{},
+ Version: protocol.ConsensusCurrentVersion,
+ }
+
+ return &dummyContext, nil
+}
+
func (s *testWorkerStubs) GenesisHash() crypto.Digest {
return crypto.Digest{0x01, 0x02, 0x03, 0x04}
}
@@ -208,25 +315,37 @@ func (s *testWorkerStubs) Latest() basics.Round {
func (s *testWorkerStubs) Wait(r basics.Round) chan struct{} {
s.mu.Lock()
defer s.mu.Unlock()
+
if s.waiters[r] == nil {
s.waiters[r] = make(chan struct{})
s.waitersCount[r] = 0
- if r <= s.latest {
- close(s.waiters[r])
- }
}
- s.waitersCount[r] = s.waitersCount[r] + 1
+
+ if r <= s.latest {
+ s.waitersCount[r] = 0
+ close(s.waiters[r])
+ retChan := s.waiters[r]
+ s.waiters[r] = nil
+ return retChan
+ }
+ s.waitersCount[r]++
return s.waiters[r]
}
func (s *testWorkerStubs) Broadcast(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except network.Peer) error {
require.Equal(s.t, tag, protocol.StateProofSigTag)
+ if s.sigmsg == nil {
+ return nil
+ }
s.sigmsg <- data
return nil
}
func (s *testWorkerStubs) BroadcastInternalSignedTxGroup(tx []transactions.SignedTxn) error {
require.Equal(s.t, len(tx), 1)
+ if s.txmsg == nil {
+ return nil
+ }
s.txmsg <- tx[0]
return nil
}
@@ -234,12 +353,79 @@ func (s *testWorkerStubs) BroadcastInternalSignedTxGroup(tx []transactions.Signe
func (s *testWorkerStubs) RegisterHandlers([]network.TaggedMessageHandler) {
}
-func (s *testWorkerStubs) advanceLatest(delta uint64) {
- s.mu.Lock()
- defer s.mu.Unlock()
+func (s *testWorkerStubs) waitForSignerAndBuilder(t *testing.T) {
+ const maxRetries = 1000000
+ i := 0
+ for {
+ numberOfWaiters := 0
+ s.mu.Lock()
+ for _, v := range s.waitersCount {
+ numberOfWaiters += v
+ }
+ s.mu.Unlock()
+ if numberOfWaiters == 2 {
+ break
+ }
+ if numberOfWaiters > 2 {
+ t.Error("found numberOfWaiters > 2. Might be bug in the test")
+ }
+ if i == maxRetries {
+ t.Error("timeout waiting for builder and signer")
+ }
+ i++
+ time.Sleep(time.Millisecond)
+ }
+}
+
+func (s *testWorkerStubs) advanceRoundsBeforeFirstStateProof(proto *config.ConsensusParams) {
+ if proto.StateProofInterval*2 <= 1 {
+ return
+ }
+
+ for r := uint64(0); r < proto.StateProofInterval*2-1; r++ {
+ s.addBlock(s.blocks[s.latest].StateProofTracking[protocol.StateProofBasic].StateProofNextRound)
+ }
+}
+func (s *testWorkerStubs) advanceRoundsWithoutStateProof(t *testing.T, delta uint64) {
for r := uint64(0); r < delta; r++ {
s.addBlock(s.blocks[s.latest].StateProofTracking[protocol.StateProofBasic].StateProofNextRound)
+ s.waitForSignerAndBuilder(t)
+ }
+}
+
+// used to simulate to workers that rounds have advanced, and stateproofs were created.
+func (s *testWorkerStubs) advanceRoundsAndCreateStateProofs(t *testing.T, delta uint64) {
+ for r := uint64(0); r < delta; r++ {
+ s.mu.Lock()
+ interval := basics.Round(config.Consensus[s.blocks[s.latest].CurrentProtocol].StateProofInterval)
+ blk := s.blocks[s.latest]
+ stateProofNextRound := s.blocks[s.latest].StateProofTracking[protocol.StateProofBasic].StateProofNextRound
+ if blk.Round%interval == 0 && stateProofNextRound-interval < blk.Round {
+ stateProofNextRound += interval
+ }
+ s.mu.Unlock()
+ s.addBlock(stateProofNextRound)
+ s.waitForSignerAndBuilder(t)
+ }
+}
+
+func (s *testWorkerStubs) mockCommit(upTo basics.Round) {
+ startRound := upTo
+
+ s.mu.Lock()
+ for round := range s.blocks {
+ if round < startRound {
+ startRound = round
+ }
+ }
+ s.mu.Unlock()
+ s.notifyPrepareVoterCommit(startRound, upTo)
+
+ for round := startRound; round <= upTo; round++ {
+ s.mu.Lock()
+ delete(s.blocks, round)
+ s.mu.Unlock()
}
}
@@ -248,7 +434,7 @@ func (s *testWorkerStubs) waitOnSigWithTimeout(timeout time.Duration) ([]byte, e
case sig := <-s.sigmsg:
return sig, nil
case <-time.After(timeout):
- return nil, fmt.Errorf("timeout waiting on sigmsg")
+ return nil, errors.New("timeout waiting on sigmsg")
}
}
@@ -257,33 +443,123 @@ func (s *testWorkerStubs) waitOnTxnWithTimeout(timeout time.Duration) (transacti
case signedTx := <-s.txmsg:
return signedTx, nil
case <-time.After(timeout):
- return transactions.SignedTxn{}, fmt.Errorf("timeout waiting on sigmsg")
+ return transactions.SignedTxn{}, errors.New("timeout waiting on stateproof txn")
}
}
-func newTestWorkerDB(t testing.TB, s *testWorkerStubs, dba db.Accessor) *Worker {
- return NewWorker(dba, logging.TestingLog(t), s, s, s, s)
+func (s *testWorkerStubs) isRoundSigned(a *require.Assertions, w *Worker, round basics.Round) bool {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ for _, key := range s.keys {
+ var accountSigExists bool
+ err := w.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ accountSigExists, err = sigExistsInDB(tx, round, key.Parent)
+ return err
+ })
+ a.NoError(err)
+ if accountSigExists {
+ return true
+ }
+ }
+
+ return false
+}
+
+func newTestWorkerOnDiskDb(t testing.TB, s *testWorkerStubs) *Worker {
+ fn := fmt.Sprintf("%s.%d", strings.ReplaceAll(t.Name(), "/", "."), crypto.RandUint64())
+
+ ctx, cancel := context.WithCancel(context.Background())
+ return &Worker{
+ spDbFileName: fn,
+ log: logging.TestingLog(t),
+ accts: s,
+ ledger: s,
+ net: s,
+ txnSender: s,
+ provers: make(map[basics.Round]spProver),
+ ctx: ctx,
+ shutdown: cancel,
+ signedCh: make(chan struct{}, 1),
+ }
+
}
func newTestWorker(t testing.TB, s *testWorkerStubs) *Worker {
- dbs, _ := dbOpenTest(t, true)
- return newTestWorkerDB(t, s, dbs.Wdb)
+ worker := newTestWorkerOnDiskDb(t, s)
+ worker.inMemory = true
+ return worker
+}
+
+func newPartKey(t testing.TB, parent basics.Address) account.PersistedParticipation {
+ version := config.Consensus[protocol.ConsensusCurrentVersion]
+ return newPartKeyWithVersion(t, version, parent)
}
// You must call defer part.Close() after calling this function,
// since it creates a DB accessor but the caller must close it (required for mss)
-func newPartKey(t testing.TB, parent basics.Address) account.PersistedParticipation {
+func newPartKeyWithVersion(t testing.TB, protoParam config.ConsensusParams, parent basics.Address) account.PersistedParticipation {
fn := fmt.Sprintf("%s.%d", strings.ReplaceAll(t.Name(), "/", "."), crypto.RandUint64())
partDB, err := db.MakeAccessor(fn, false, true)
require.NoError(t, err)
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
- part, err := account.FillDBWithParticipationKeys(partDB, parent, 0, basics.Round(15*proto.StateProofInterval), proto.DefaultKeyDilution)
+ part, err := account.FillDBWithParticipationKeys(partDB, parent, 0, basics.Round(15*protoParam.StateProofInterval), protoParam.DefaultKeyDilution)
require.NoError(t, err)
return part
}
+func countProversInDB(store db.Accessor) (nrows int, err error) {
+ err = store.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ row := tx.QueryRow("SELECT COUNT(*) FROM provers")
+ err := row.Scan(&nrows)
+ if err != nil {
+ return err
+ }
+ return nil
+ })
+
+ return
+}
+
+func expectedNumberOfProvers(stateproofInterval uint64, atRound basics.Round, nextStateProof basics.Round) int {
+ if nextStateProof > atRound {
+ return 0
+ }
+
+ return int((atRound-nextStateProof)/basics.Round(stateproofInterval) + 1)
+}
+
+func createWorkerAndParticipants(t *testing.T, version protocol.ConsensusVersion, proto config.ConsensusParams) ([]account.Participation, *testWorkerStubs, *Worker) {
+ var keys []account.Participation
+ for i := 0; i < 2; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ p := newPartKeyWithVersion(t, proto, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerStubsWithVersion(t, keys, version, 10)
+ w := newTestWorker(t, s)
+ w.Start()
+ return keys, s, w
+}
+
+// threshold == 0 meaning nothing was deleted.
+func requireDeletedKeysToBeDeletedBefore(t *testing.T, s *testWorkerStubs, threshold basics.Round) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ for _, prt := range s.keys {
+ if threshold == 0 {
+ require.Equal(t, threshold, s.deletedKeysBeforeRoundMap[prt.ID()])
+ continue
+ }
+ // minus one because we delete keys up to the round stated in the map but not including!
+ require.Greater(t, threshold, s.deletedKeysBeforeRoundMap[prt.ID()]-1)
+ }
+}
+
func TestWorkerAllSigs(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -296,20 +572,20 @@ func TestWorkerAllSigs(t *testing.T) {
keys = append(keys, p.Participation)
}
- s := newWorkerStubs(t, keys, len(keys))
+ s := newWorkerStubsWithChannel(t, keys, len(keys))
w := newTestWorker(t, s)
w.Start()
- defer w.Shutdown()
+ defer w.Stop()
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- s.advanceLatest(proto.StateProofInterval + proto.StateProofInterval/2)
+ // at this point the ledger is at round 511 - we add 2 blocks to pass the state proof interval
+ s.advanceRoundsWithoutStateProof(t, 2)
// Go through several iterations, making sure that we get
// the signatures and certs broadcast at each round.
for iter := 0; iter < 5; iter++ {
- s.advanceLatest(proto.StateProofInterval)
-
- for i := 0; i < len(keys); i++ {
+ s.advanceRoundsWithoutStateProof(t, proto.StateProofInterval-1)
+ for i := 0; i < 2*len(keys); i++ {
// Expect all signatures to be broadcast.
_, err := s.waitOnSigWithTimeout(time.Second * 2)
require.NoError(t, err)
@@ -325,15 +601,9 @@ func TestWorkerAllSigs(t *testing.T) {
if lastAttestedRound < basics.Round(iter+2)*basics.Round(proto.StateProofInterval) {
continue
}
-
require.Equal(t, lastAttestedRound, basics.Round(iter+2)*basics.Round(proto.StateProofInterval))
- stateProofLatestRound, err := s.BlockHdr(lastAttestedRound)
- require.NoError(t, err)
-
- votersRound := lastAttestedRound.SubSaturate(basics.Round(proto.StateProofInterval))
-
- msg, err := GenerateStateProofMessage(s, uint64(votersRound), stateProofLatestRound)
+ msg, err := GenerateStateProofMessage(s, lastAttestedRound)
require.NoError(t, err)
require.Equal(t, msg, tx.Txn.Message)
@@ -350,6 +620,7 @@ func TestWorkerAllSigs(t *testing.T) {
require.NoError(t, err)
break
}
+ s.advanceRoundsAndCreateStateProofs(t, 1)
}
}
@@ -365,14 +636,16 @@ func TestWorkerPartialSigs(t *testing.T) {
keys = append(keys, p.Participation)
}
- s := newWorkerStubs(t, keys, 10)
+ s := newWorkerStubsWithChannel(t, keys, 10)
w := newTestWorker(t, s)
w.Start()
- defer w.Shutdown()
+ defer w.Stop()
+
+ // at this point the ledger is at round 511 - we push add one block, so it will start to create state proofs
+ s.advanceRoundsWithoutStateProof(t, 1)
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- s.advanceLatest(proto.StateProofInterval + proto.StateProofInterval/2)
- s.advanceLatest(proto.StateProofInterval)
+ s.advanceRoundsWithoutStateProof(t, proto.StateProofInterval/2+1)
for i := 0; i < len(keys); i++ {
// Expect all signatures to be broadcast.
@@ -388,7 +661,7 @@ func TestWorkerPartialSigs(t *testing.T) {
}
// Expect a state proof to be formed in the next StateProofInterval/2.
- s.advanceLatest(proto.StateProofInterval / 2)
+ s.advanceRoundsWithoutStateProof(t, proto.StateProofInterval/2)
tx, err := s.waitOnTxnWithTimeout(time.Second * 5)
require.NoError(t, err)
@@ -397,12 +670,7 @@ func TestWorkerPartialSigs(t *testing.T) {
require.Equal(t, tx.Txn.Type, protocol.StateProofTx)
require.Equal(t, lastAttestedRound, 2*basics.Round(proto.StateProofInterval))
- stateProofLatestRound, err := s.BlockHdr(lastAttestedRound)
- require.NoError(t, err)
-
- votersRound := lastAttestedRound.SubSaturate(basics.Round(proto.StateProofInterval))
-
- msg, err := GenerateStateProofMessage(s, uint64(votersRound), stateProofLatestRound)
+ msg, err := GenerateStateProofMessage(s, lastAttestedRound)
require.NoError(t, err)
require.Equal(t, msg, tx.Txn.Message)
@@ -430,13 +698,13 @@ func TestWorkerInsufficientSigs(t *testing.T) {
keys = append(keys, p.Participation)
}
- s := newWorkerStubs(t, keys, 10)
+ s := newWorkerStubsWithChannel(t, keys, 10)
w := newTestWorker(t, s)
w.Start()
- defer w.Shutdown()
+ defer w.Stop()
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- s.advanceLatest(3 * proto.StateProofInterval)
+ s.advanceRoundsWithoutStateProof(t, 3*proto.StateProofInterval)
for i := 0; i < len(keys); i++ {
// Expect all signatures to be broadcast.
@@ -454,6 +722,8 @@ func TestWorkerInsufficientSigs(t *testing.T) {
func TestWorkerRestart(t *testing.T) {
partitiontest.PartitionTest(t)
+ a := require.New(t)
+ const expectedStateProofs = 5
var keys []account.Participation
for i := 0; i < 10; i++ {
@@ -464,35 +734,39 @@ func TestWorkerRestart(t *testing.T) {
keys = append(keys, p.Participation)
}
- s := newWorkerStubs(t, keys, 10)
+ s := newWorkerStubsWithChannel(t, keys, len(keys))
+ s.sigmsg = nil
+ w := newTestWorker(t, s)
+ w.Start()
+ defer w.Stop()
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- s.advanceLatest(3*proto.StateProofInterval - 1)
-
- dbRand := crypto.RandUint64()
-
- formedAt := -1
- for i := 0; formedAt < 0 && i < len(keys); i++ {
- // Give one key at a time to the worker, and then shut it down,
- // to make sure that it will correctly save and restore these
- // signatures across restart.
- s.keys = keys[i : i+1]
- dbs, _ := dbOpenTestRand(t, true, dbRand)
- w := newTestWorkerDB(t, s, dbs.Wdb)
+ s.advanceRoundsWithoutStateProof(t, 1)
+ lastRound := uint64(0)
+ for i := 0; i < expectedStateProofs; i++ {
+ s.advanceRoundsWithoutStateProof(t, proto.StateProofInterval/2-1)
+ w.Stop()
w.Start()
+ s.advanceRoundsWithoutStateProof(t, proto.StateProofInterval/2)
+
+ var tx transactions.SignedTxn
+ // there will be several state proof txn. we extract them
+ for {
+ var err error
+ tx, err = s.waitOnTxnWithTimeout(time.Second * 5)
+ a.NoError(err)
+ if lastRound == 0 || lastRound < tx.Txn.Message.LastAttestedRound {
+ break
+ }
- // Check if the cert formed
- select {
- case <-s.txmsg:
- formedAt = i
- case <-time.After(time.Second):
}
- w.Shutdown()
+ // since a state proof txn was created, we update the header with the next state proof round
+ // i.e network has accepted the state proof.
+ s.addBlock(basics.Round(tx.Txn.Message.LastAttestedRound + proto.StateProofInterval))
+ lastRound = tx.Txn.Message.LastAttestedRound
}
-
- require.True(t, formedAt > 1)
- require.True(t, formedAt < 5)
+ a.Equal(uint64(expectedStateProofs+1), lastRound/proto.StateProofInterval)
}
func TestWorkerHandleSig(t *testing.T) {
@@ -507,13 +781,13 @@ func TestWorkerHandleSig(t *testing.T) {
keys = append(keys, p.Participation)
}
- s := newWorkerStubs(t, keys, 10)
+ s := newWorkerStubsWithChannel(t, keys, 10)
w := newTestWorker(t, s)
w.Start()
- defer w.Shutdown()
+ defer w.Stop()
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- s.advanceLatest(3 * proto.StateProofInterval)
+ s.advanceRoundsWithoutStateProof(t, 3*proto.StateProofInterval)
for i := 0; i < len(keys); i++ {
// Expect all signatures to be broadcast.
@@ -530,12 +804,12 @@ func TestWorkerHandleSig(t *testing.T) {
}
}
-func TestSignerDeletesUnneededStateProofKeys(t *testing.T) {
+func TestWorkerIgnoresSignatureForNonCacheProvers(t *testing.T) {
partitiontest.PartitionTest(t)
+ a := require.New(t)
var keys []account.Participation
- nParticipants := 2
- for i := 0; i < nParticipants; i++ {
+ for i := 0; i < 2; i++ {
var parent basics.Address
crypto.RandBytes(parent[:])
p := newPartKey(t, parent)
@@ -546,59 +820,176 @@ func TestSignerDeletesUnneededStateProofKeys(t *testing.T) {
s := newWorkerStubs(t, keys, 10)
w := newTestWorker(t, s)
w.Start()
- defer w.Shutdown()
+ defer w.Stop()
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- s.advanceLatest(3 * proto.StateProofInterval)
- // Expect all signatures to be broadcast.
+ targetRound := (proversCacheLength + 1) * proto.StateProofInterval
+
+ s.advanceRoundsWithoutStateProof(t, targetRound)
+
+ // clean up the cache and clean up the signatures database so the handler will accept our signatures.
+ s.mu.Lock()
+ w.provers = make(map[basics.Round]spProver)
+ a.NoError(w.db.Atomic(func(_ context.Context, tx *sql.Tx) error {
+ _, err := tx.Exec("DELETE from sigs")
+ return err
+ }))
+ s.mu.Unlock()
+
+ // rounds [2*proto.StateProofInterval, 3*proto.StateProofInterval, ... (proversCacheLength - 1)*proto.StateProofInterval] should be
+ // accepted by handleSig
+ i := uint64(0)
+ for ; i < (proversCacheLength - 1); i++ {
+ fwd, err := sendSigToHandler(proto, i, w, a, s)
+ a.Equal(network.Broadcast, fwd)
+ a.NoError(err)
+ }
+
+ // signature for (proversCacheLength)*proto.StateProofInterval should be rejected - due to cache limit
+ fwd, err := sendSigToHandler(proto, i, w, a, s)
+ a.Equal(network.Ignore, fwd)
+ a.NoError(err)
+ i++
+
+ // newest signature should be accepted
+ fwd, err = sendSigToHandler(proto, i, w, a, s)
+ a.Equal(network.Broadcast, fwd)
+ a.NoError(err)
- require.Zero(t, s.GetNumDeletedKeys())
- w.signStateProof(s.blocks[basics.Round(proto.StateProofInterval)])
- require.Equal(t, s.GetNumDeletedKeys(), nParticipants)
}
-func TestSignerDoesntDeleteKeysWhenDBDoesntStoreSigs(t *testing.T) {
- partitiontest.PartitionTest(t)
+func sendSigToHandler(proto config.ConsensusParams, i uint64, w *Worker, a *require.Assertions, s *testWorkerStubs) (network.ForwardingPolicy, error) {
+ rnd := basics.Round(2*proto.StateProofInterval + i*proto.StateProofInterval)
+ stateproofMessage, err := GenerateStateProofMessage(w.ledger, rnd)
+ a.NoError(err)
- var keys []account.Participation
- for i := 0; i < 2; i++ {
- var parent basics.Address
- crypto.RandBytes(parent[:])
- p := newPartKey(t, parent)
- defer p.Close()
- keys = append(keys, p.Participation)
+ hashedStateproofMessage := stateproofMessage.Hash()
+ spRecords := s.StateProofKeys(rnd)
+ sig, err := spRecords[0].StateProofSecrets.SignBytes(hashedStateproofMessage[:])
+ a.NoError(err)
+
+ msg := sigFromAddr{
+ SignerAddress: spRecords[0].Account,
+ Round: rnd,
+ Sig: sig,
}
- s := newWorkerStubs(t, keys, 10)
- dbs, _ := dbOpenTest(t, true)
+ fwd, err := w.handleSig(msg, msg.SignerAddress)
+ return fwd, err
+}
- logger := logging.NewLogger()
- logger.SetOutput(io.Discard)
+func TestKeysRemoveOnlyAfterStateProofAccepted(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ const expectedNumberOfStateProofs = uint64(3)
+ firstExpectedStateproof := basics.Round(proto.StateProofInterval * 2)
- w := NewWorker(dbs.Wdb, logger, s, s, s, s)
+ keys, s, w := createWorkerAndParticipants(t, protocol.ConsensusCurrentVersion, proto)
+ defer w.Stop()
- w.Start()
- defer w.Shutdown()
+ s.advanceRoundsWithoutStateProof(t, expectedNumberOfStateProofs*proto.StateProofInterval)
+
+ // since no state proof was confirmed (i.e the next state proof round == firstExpectedStateproof), we expect a node
+ // to keep its keys to sign the state proof firstExpectedStateproof. every participant should have keys for that round
+ checkedKeys := s.StateProofKeys(firstExpectedStateproof)
+ require.Equal(t, len(keys), len(checkedKeys))
+ requireDeletedKeysToBeDeletedBefore(t, s, firstExpectedStateproof) // i should at this point have the keys to sign on round 512.... how come they were deleted?
+
+ s.advanceRoundsAndCreateStateProofs(t, proto.StateProofInterval)
+
+ // the first state proof was confirmed keys for that state proof can be removed
+ // So we should have the not deleted keys for proto.StateProofInterval + firstExpectedStateproof
+ requireDeletedKeysToBeDeletedBefore(t, s, firstExpectedStateproof+basics.Round(proto.StateProofInterval))
+ checkedKeys = s.StateProofKeys(firstExpectedStateproof)
+ require.Equal(t, 0, len(checkedKeys))
+ checkedKeys = s.StateProofKeys(firstExpectedStateproof + basics.Round(proto.StateProofInterval))
+ require.Equal(t, len(keys), len(checkedKeys))
+}
+
+func TestKeysRemoveOnlyAfterStateProofAcceptedSmallIntervals(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ const stateProofIntervalForTest = 64
+ const smallIntervalVersionName = "TestKeysRemoveOnlyAfterStateProofAcceptedSmallIntervals"
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- s.advanceLatest(3 * proto.StateProofInterval)
- // Expect all signatures to be broadcast.
+ proto.StateProofInterval = stateProofIntervalForTest
+ config.Consensus[smallIntervalVersionName] = proto
+ defer func() {
+ delete(config.Consensus, smallIntervalVersionName)
+ }()
- require.NoError(t, w.db.Atomic(
- func(ctx context.Context, tx *sql.Tx) error {
- _, err := tx.Exec("DROP TABLE sigs")
- return err
- }),
- )
+ partitiontest.PartitionTest(t)
+
+ const expectedNumberOfStateProofs = uint64(3)
+ firstExpectedStateproof := basics.Round(proto.StateProofInterval * 2)
+
+ keys, s, w := createWorkerAndParticipants(t, smallIntervalVersionName, proto)
+ defer w.Stop()
+
+ s.advanceRoundsWithoutStateProof(t, expectedNumberOfStateProofs*proto.StateProofInterval)
+
+ // since no state proof was confirmed (i.e the next state proof round == firstExpectedStateproof), we expect a node
+ // to keep its keys to sign the state proof firstExpectedStateproof. every participant should have keys for that round
+ requireDeletedKeysToBeDeletedBefore(t, s, 0)
+ checkedKeys := s.StateProofKeys(firstExpectedStateproof)
+ require.Equal(t, len(keys), len(checkedKeys))
+
+ // confirm stateproof for firstExpectedStateproof
+ s.advanceRoundsAndCreateStateProofs(t, proto.StateProofInterval)
+
+ // the first state proof was confirmed. However, since keylifetime is greater than the state proof interval
+ // the key for firstExpectedStateproof should be kept (since it is being reused on 3 * proto.StateProofInterval)
+ requireDeletedKeysToBeDeletedBefore(t, s, 0)
+
+ checkedKeys = s.StateProofKeys(firstExpectedStateproof)
+ require.Equal(t, len(keys), len(checkedKeys))
+ checkedKeys = s.StateProofKeys(firstExpectedStateproof + basics.Round(proto.StateProofInterval))
+ require.Equal(t, len(keys), len(checkedKeys))
+}
+
+func TestKeysRemoveOnlyAfterStateProofAcceptedLargeIntervals(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ const stateProofIntervalForTest = 260
+ const smallIntervalVersionName = "TestKeysRemoveOnlyAfterStateProofAcceptedLargeIntervals"
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ proto.StateProofInterval = stateProofIntervalForTest
+ config.Consensus[smallIntervalVersionName] = proto
+ defer func() {
+ delete(config.Consensus, smallIntervalVersionName)
+ }()
+
+ const expectedNumberOfStateProofs = uint64(3)
+ firstExpectedStateproof := basics.Round(proto.StateProofInterval * 2)
+
+ keys, s, w := createWorkerAndParticipants(t, protocol.ConsensusCurrentVersion, proto)
+ defer w.Stop()
- w.signStateProof(s.blocks[3*basics.Round(proto.StateProofInterval)])
- require.Zero(t, s.GetNumDeletedKeys())
+ s.advanceRoundsWithoutStateProof(t, expectedNumberOfStateProofs*proto.StateProofInterval)
+ // since no state proof was confirmed (i.e the next state proof round == firstExpectedStateproof), we expect a node
+ // to keep its keys to sign the state proof firstExpectedStateproof. every participant should have keys for that round
+ requireDeletedKeysToBeDeletedBefore(t, s, firstExpectedStateproof)
+ checkedKeys := s.StateProofKeys(firstExpectedStateproof)
+ require.Equal(t, len(keys), len(checkedKeys))
+
+ // confirm stateproof for firstExpectedStateproof
+ s.advanceRoundsAndCreateStateProofs(t, proto.StateProofInterval)
+
+ // the first state proof was confirmed keys for that state proof can be removed
+ requireDeletedKeysToBeDeletedBefore(t, s, basics.Round(proto.StateProofInterval)+firstExpectedStateproof)
+
+ checkedKeys = s.StateProofKeys(firstExpectedStateproof)
+ require.Equal(t, 0, len(checkedKeys))
+ checkedKeys = s.StateProofKeys(firstExpectedStateproof + basics.Round(proto.StateProofInterval))
+ require.Equal(t, len(keys), len(checkedKeys))
}
-func TestWorkerRemoveBuildersAndSignatures(t *testing.T) {
+func TestWorkersProversCacheAndSignatures(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
- const expectedStateProofs = 8
+ const expectedStateProofs = proversCacheLength + 2
var keys []account.Participation
for i := 0; i < 10; i++ {
var parent basics.Address
@@ -611,53 +1002,73 @@ func TestWorkerRemoveBuildersAndSignatures(t *testing.T) {
s := newWorkerStubs(t, keys, len(keys))
w := newTestWorker(t, s)
w.Start()
- defer w.Shutdown()
+ defer w.Stop()
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- s.advanceLatest(proto.StateProofInterval + proto.StateProofInterval/2)
- for iter := 0; iter < expectedStateProofs; iter++ {
- s.advanceLatest(proto.StateProofInterval)
- tx := <-s.txmsg
- a.Equal(tx.Txn.Type, protocol.StateProofTx)
+ // we break the loop into two part since we don't want to add a state proof round (Round % 256 == 0)
+ for iter := 0; iter < expectedStateProofs-1; iter++ {
+ s.advanceRoundsWithoutStateProof(t, proto.StateProofInterval)
}
+ s.advanceRoundsWithoutStateProof(t, proto.StateProofInterval/2)
+
+ a.Equal(proversCacheLength, len(w.provers))
+ verifyProverCache(proto, w, a, expectedStateProofs)
- err := waitForBuilderAndSignerToWaitOnRound(s)
+ countDB, err := countProversInDB(w.db)
a.NoError(err)
- a.Equal(expectedStateProofs, len(w.builders))
+ a.Equal(expectedStateProofs, countDB)
+ threshold := onlineProversThreshold(&proto, basics.Round(512)) // 512 since no StateProofs are confirmed yet (512 is the first, commitment at 256)
var roundSigs map[basics.Round][]pendingSig
err = w.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- roundSigs, err = getPendingSigs(tx)
+ roundSigs, err = getPendingSigs(tx, threshold, basics.Round(256+proto.StateProofInterval*expectedStateProofs), false)
return
})
+ a.NoError(err)
+ a.Equal(proversCacheLength, len(roundSigs)) // Number of broadcasted sigs should be the same as number of (online) cached provers.
- a.Equal(expectedStateProofs, len(roundSigs))
-
- // add block that confirm a state proof for interval: expectedStateProofs - 1
- s.mu.Lock()
- s.addBlock(basics.Round((expectedStateProofs - 1) * config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval))
- s.mu.Unlock()
+ /*
+ add block that confirm a state proof for interval: expectedStateProofs
+ */
+ s.addBlock(basics.Round((expectedStateProofs) * config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval))
+ s.waitForSignerAndBuilder(t)
- err = waitForBuilderAndSignerToWaitOnRound(s)
+ count := expectedNumberOfProvers(proto.StateProofInterval, s.latest, basics.Round((expectedStateProofs)*config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval))
+ countDB, err = countProversInDB(w.db)
a.NoError(err)
- a.Equal(3, len(w.builders))
+ a.Equal(count, countDB)
+ threshold = onlineProversThreshold(&proto, s.blocks[s.latest].StateProofTracking[protocol.StateProofBasic].StateProofNextRound)
+ maxStateProofRnd := s.latest.RoundDownToMultipleOf(basics.Round(proto.StateProofInterval))
err = w.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- roundSigs, err = getPendingSigs(tx)
+ roundSigs, err = getPendingSigs(tx, threshold, maxStateProofRnd, false)
return
})
+ a.NoError(err)
+ a.Equal(count, len(roundSigs))
+}
- a.Equal(3, len(roundSigs))
+func verifyProverCache(proto config.ConsensusParams, w *Worker, a *require.Assertions, expectedStateProofs uint64) {
+ for i := uint64(0); i < proversCacheLength-1; i++ {
+ rnd := proto.StateProofInterval*2 + proto.StateProofInterval*i
+ _, exists := w.provers[basics.Round(rnd)]
+ a.True(exists)
+ }
+ _, exists := w.provers[basics.Round(proto.StateProofInterval*(expectedStateProofs+1))]
+ a.True(exists)
}
-func TestWorkerBuildersRecoveryLimit(t *testing.T) {
+// TestSignatureBroadcastPolicy makes sure that during half of a state proof interval, every online account
+// will broadcast only proversCacheLength amount of signatures
+func TestSignatureBroadcastPolicy(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ const numberOfParticipants = 5
+ const expectedStateProofs = proversCacheLength + 2
var keys []account.Participation
- for i := 0; i < 10; i++ {
+ for i := 0; i < numberOfParticipants; i++ {
var parent basics.Address
crypto.RandBytes(parent[:])
p := newPartKey(t, parent)
@@ -668,83 +1079,98 @@ func TestWorkerBuildersRecoveryLimit(t *testing.T) {
s := newWorkerStubs(t, keys, len(keys))
w := newTestWorker(t, s)
w.Start()
- defer w.Shutdown()
+ defer w.Stop()
- s.advanceLatest(proto.StateProofInterval + proto.StateProofInterval/2)
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
- for iter := uint64(0); iter < proto.StateProofMaxRecoveryIntervals+1; iter++ {
- s.advanceLatest(proto.StateProofInterval)
- tx := <-s.txmsg
- a.Equal(tx.Txn.Type, protocol.StateProofTx)
+ for iter := 0; iter < expectedStateProofs-1; iter++ {
+ s.advanceRoundsWithoutStateProof(t, proto.StateProofInterval)
}
+ // set the latest block to be at round r, where r % 256 == 0
+ s.advanceRoundsWithoutStateProof(t, 1)
- // since this test involves go routine, we would like to make sure that when
- // we sample the builder it already processed our current round.
- // in order to that, we wait for singer and the builder to wait.
- // then we push one more round so the builder could process it (since the builder might skip rounds)
- err := waitForBuilderAndSignerToWaitOnRound(s)
- a.NoError(err)
- s.mu.Lock()
- s.addBlock(basics.Round(proto.StateProofInterval * 2))
- s.mu.Unlock()
- err = waitForBuilderAndSignerToWaitOnRound(s)
- a.NoError(err)
+ checkSignatureBroadcastHalfInterval(t, proto, expectedStateProofs, s, numberOfParticipants, a)
+ checkSignatureBroadcastHalfInterval(t, proto, expectedStateProofs, s, numberOfParticipants, a)
+}
- // should not give up on rounds
- a.Equal(proto.StateProofMaxRecoveryIntervals+1, uint64(len(w.builders)))
+func checkSignatureBroadcastHalfInterval(t *testing.T, proto config.ConsensusParams, expectedStateProofs uint64, s *testWorkerStubs, numberOfParticipants int, a *require.Assertions) {
+ roundSigs := make(map[basics.Round]int)
+ for i := uint64(2); i < proversCacheLength; i++ {
+ roundSigs[basics.Round(i*proto.StateProofInterval)] = 0
+ }
+ roundSigs[basics.Round((expectedStateProofs+1)*proto.StateProofInterval)] = 0
- var roundSigs map[basics.Round][]pendingSig
- err = w.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- roundSigs, err = getPendingSigs(tx)
- return
- })
- a.Equal(proto.StateProofMaxRecoveryIntervals+1, uint64(len(roundSigs)))
+ // empty all pending sigs
+ s.sigmsg = make(chan []byte, 1024*1024)
- s.advanceLatest(proto.StateProofInterval)
- tx := <-s.txmsg
- a.Equal(tx.Txn.Type, protocol.StateProofTx)
+ s.advanceRoundsWithoutStateProof(t, proto.StateProofInterval/2)
+ for i := 0; i < numberOfParticipants*proversCacheLength; i++ {
+ sigMessage := sigFromAddr{}
+ sigMessageBytes, err := s.waitOnSigWithTimeout(time.Second * 2)
+ a.NoError(err)
- err = waitForBuilderAndSignerToWaitOnRound(s)
- a.NoError(err)
- s.mu.Lock()
- s.addBlock(basics.Round(proto.StateProofInterval * 2))
- s.mu.Unlock()
- err = waitForBuilderAndSignerToWaitOnRound(s)
- a.NoError(err)
+ err = protocol.Decode(sigMessageBytes, &sigMessage)
+ a.NoError(err)
+
+ roundSigs[sigMessage.Round]++
+ }
+
+ a.Equal(proversCacheLength, len(roundSigs))
+ for _, numOfSignatures := range roundSigs {
+ a.Equal(numberOfParticipants, numOfSignatures)
+ }
+}
+
+func TestWorkerDoesNotLimitProversAndSignaturesOnDisk(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ var keys []account.Participation
+ for i := 0; i < 10; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerStubs(t, keys, len(keys))
+ w := newTestWorker(t, s)
+ w.Start()
+ defer w.Stop()
+
+ for iter := uint64(0); iter < proto.StateProofMaxRecoveryIntervals+1; iter++ {
+ s.advanceRoundsWithoutStateProof(t, proto.StateProofInterval)
+ }
// should not give up on rounds
- a.Equal(proto.StateProofMaxRecoveryIntervals+1, uint64(len(w.builders)))
+ a.Equal(proversCacheLength, len(w.provers))
+ countDB, err := countProversInDB(w.db)
+ a.NoError(err)
+ a.Equal(proto.StateProofMaxRecoveryIntervals+1, uint64(countDB))
- roundSigs = make(map[basics.Round][]pendingSig)
- err = w.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- roundSigs, err = getPendingSigs(tx)
- return
- })
- a.Equal(proto.StateProofMaxRecoveryIntervals+1, uint64(len(roundSigs)))
+ sigsCount := countAllSignaturesInDB(t, w.db)
+ a.Equal(proto.StateProofMaxRecoveryIntervals+1, sigsCount)
}
-func waitForBuilderAndSignerToWaitOnRound(s *testWorkerStubs) error {
- const maxRetries = 10000
- i := 0
- for {
- s.mu.Lock()
- r := s.latest + 1
- // in order to make sure the builder and the signer are waiting for a round we need to make sure
- // that round r has c channel and r +1 doesn't have.
- // we also want to make sure that the builder and the singer are waiting
- isWaitingForRound := s.waiters[r] != nil && s.waiters[r+1] == nil
- isWaitingForRound = isWaitingForRound && (s.waitersCount[r] == 2)
- s.mu.Unlock()
- if !isWaitingForRound {
- if i == maxRetries {
- return fmt.Errorf("timeout while waiting for round")
- }
- i++
- time.Sleep(time.Millisecond)
- continue
+func countAllSignaturesInDB(t *testing.T, accessor db.Accessor) uint64 {
+ var roundSigs map[basics.Round][]pendingSig
+ err := accessor.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ query := "SELECT sprnd, signer, sig, from_this_node FROM sigs "
+ rows, err := tx.Query(query)
+ if err != nil {
+ return err
+ }
+ defer rows.Close()
+ roundSigs, err = rowsToPendingSigs(rows)
+ if err != nil {
+ return err
}
return nil
- }
+ })
+ require.NoError(t, err)
+ return uint64(len(roundSigs))
}
type sigOrigin int
@@ -778,14 +1204,11 @@ func getSignaturesInDatabase(t *testing.T, numAddresses int, sigFrom sigOrigin)
signatureBcasted[parent] = 0
}
- tns = newWorkerStubs(t, keys, len(keys))
- spw = newTestWorker(t, tns)
+ tns = newWorkerStubsWithChannel(t, keys, len(keys))
+ spw = newTestWorkerOnDiskDb(t, tns)
- // Prepare the database
- err := spw.db.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- return initDB(tx)
- })
- require.NoError(t, err)
+ // we don't need go routines to run so just create the db
+ spw.initDb(false)
// All the keys are for round 255. This way, starting the period at 256,
// there will be no disqualified signatures from broadcasting because they are
@@ -835,6 +1258,8 @@ func getSignaturesInDatabase(t *testing.T, numAddresses int, sigFrom sigOrigin)
func TestSigBroacastTwoPerSig(t *testing.T) {
partitiontest.PartitionTest(t)
signatureBcasted, fromThisNode, tns, spw := getSignaturesInDatabase(t, 10, sigAlternateOrigin)
+ defer os.Remove(spw.spDbFileName)
+ defer spw.db.Close()
for periods := 1; periods < 10; periods += 3 {
sendReceiveCountMessages(t, tns, signatureBcasted, fromThisNode, spw, periods)
@@ -867,7 +1292,7 @@ func sendReceiveCountMessages(t *testing.T, tns *testWorkerStubs, signatureBcast
// Broadcast the messages
for brnd := 257; brnd < 257+int(proto.StateProofInterval)*periods; brnd++ {
- spw.broadcastSigs(basics.Round(brnd), proto)
+ spw.broadcastSigs(basics.Round(brnd), basics.Round(512), proto)
}
close(tns.sigmsg)
@@ -883,7 +1308,7 @@ func sendReceiveCountMessages(t *testing.T, tns *testWorkerStubs, signatureBcast
}
}
-func TestBuilderGeneratesValidStateProofTXN(t *testing.T) {
+func TestProverGeneratesValidStateProofTXN(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
@@ -896,15 +1321,14 @@ func TestBuilderGeneratesValidStateProofTXN(t *testing.T) {
keys = append(keys, p.Participation)
}
- s := newWorkerStubs(t, keys, len(keys))
+ s := newWorkerStubsWithChannel(t, keys, len(keys))
w := newTestWorker(t, s)
w.Start()
- defer w.Shutdown()
+ defer w.Stop()
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- s.advanceLatest(proto.StateProofInterval + proto.StateProofInterval/2)
- s.advanceLatest(proto.StateProofInterval)
+ s.advanceRoundsWithoutStateProof(t, proto.StateProofInterval)
for i := 0; i < len(keys); i++ {
// Expect all signatures to be broadcast.
@@ -924,10 +1348,13 @@ func TestForwardNotFromThisNodeSecondHalf(t *testing.T) {
partitiontest.PartitionTest(t)
_, _, tns, spw := getSignaturesInDatabase(t, 10, sigNotFromThisNode)
+ defer os.Remove(spw.spDbFileName)
+ defer spw.db.Close()
proto := config.Consensus[protocol.ConsensusCurrentVersion]
for brnd := 0; brnd < int(proto.StateProofInterval*10); brnd++ {
- spw.broadcastSigs(basics.Round(brnd), proto)
+ stateProofNextRound := basics.Round(brnd).RoundDownToMultipleOf(basics.Round(proto.StateProofInterval))
+ spw.broadcastSigs(basics.Round(brnd), stateProofNextRound, proto)
select {
case <-tns.sigmsg:
// The message is broadcast in the second half of the period
@@ -943,10 +1370,13 @@ func TestForwardNotFromThisNodeFirstHalf(t *testing.T) {
partitiontest.PartitionTest(t)
signatureBcasted, fromThisNode, tns, spw := getSignaturesInDatabase(t, 10, sigAlternateOrigin)
+ defer os.Remove(spw.spDbFileName)
+ defer spw.db.Close()
proto := config.Consensus[protocol.ConsensusCurrentVersion]
for brnd := 0; brnd < int(proto.StateProofInterval*10); brnd++ {
- spw.broadcastSigs(basics.Round(brnd), proto)
+ stateProofNextRound := basics.Round(brnd).RoundDownToMultipleOf(basics.Round(proto.StateProofInterval))
+ spw.broadcastSigs(basics.Round(brnd), stateProofNextRound, proto)
select {
case bMsg := <-tns.sigmsg:
sfa := sigFromAddr{}
@@ -981,10 +1411,9 @@ func setBlocksAndMessage(t *testing.T, sigRound basics.Round) (s *testWorkerStub
s = newWorkerStubs(t, []account.Participation{p.Participation}, 10)
w = newTestWorker(t, s)
+ w.initDb(w.inMemory)
- for r := 0; r < int(proto.StateProofInterval)*2; r++ {
- s.addBlock(basics.Round(proto.StateProofInterval * 2))
- }
+ s.addBlock(basics.Round(proto.StateProofInterval * 2))
msg = sigFromAddr{
SignerAddress: address,
@@ -1028,12 +1457,7 @@ func TestWorkerHandleSigRoundNotInLedger(t *testing.T) {
fwd, err := w.handleSig(msg, msg.SignerAddress)
require.Equal(t, network.Ignore, fwd)
- expected := ledgercore.ErrNoEntry{
- Round: msg.Round,
- Latest: w.ledger.Latest(),
- Committed: w.ledger.Latest(),
- }
- require.Equal(t, expected, err)
+ require.ErrorContains(t, err, "latest round is smaller than given")
}
// relays reject signatures for wrong message (sig verification fails)
@@ -1051,10 +1475,7 @@ func TestWorkerHandleSigWrongSignature(t *testing.T) {
fwd, err := w.handleSig(msg, msg.SignerAddress)
require.Equal(t, network.Disconnect, fwd)
- expected2 := fmt.Errorf("%w: %v",
- merklesignature.ErrSignatureSchemeVerificationFailed,
- merklearray.ErrRootMismatch)
- require.Equal(t, expected2, err)
+ require.ErrorIs(t, err, errSignatureVerification)
}
// relays reject signatures for address not in top N
@@ -1076,8 +1497,9 @@ func TestWorkerHandleSigAddrsNotInTopN(t *testing.T) {
keys = append(keys, p.Participation)
}
- s := newWorkerStubs(t, keys[0:proto.StateProofTopVoters], 10)
+ s := newWorkerStubsWithChannel(t, keys[0:proto.StateProofTopVoters], 10)
w := newTestWorker(t, s)
+ w.initDb(w.inMemory)
for r := 0; r < int(proto.StateProofInterval)*2; r++ {
s.addBlock(basics.Round(r))
@@ -1097,12 +1519,10 @@ func TestWorkerHandleSigAddrsNotInTopN(t *testing.T) {
fwd, err := w.handleSig(msg, msg.SignerAddress)
require.Equal(t, network.Disconnect, fwd)
- expected3 := fmt.Errorf("handleSig: %v not in participants for %d",
- msg.SignerAddress, msg.Round)
- require.Equal(t, expected3, err)
+ require.ErrorIs(t, err, errAddressNotInVoters)
}
-// Signature already part of the builderForRound, ignore
+// Signature already part of the proverForRound, ignore
func TestWorkerHandleSigAlreadyIn(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -1110,9 +1530,7 @@ func TestWorkerHandleSigAlreadyIn(t *testing.T) {
lastRound := proto.StateProofInterval * 2
s, w, msg, _ := setBlocksAndMessage(t, basics.Round(lastRound))
- latestBlockHeader, err := w.ledger.BlockHdr(basics.Round(lastRound))
- require.NoError(t, err)
- stateproofMessage, err := GenerateStateProofMessage(w.ledger, proto.StateProofInterval, latestBlockHeader)
+ stateproofMessage, err := GenerateStateProofMessage(w.ledger, basics.Round(lastRound))
require.NoError(t, err)
hashedStateproofMessage := stateproofMessage.Hash()
@@ -1122,9 +1540,7 @@ func TestWorkerHandleSigAlreadyIn(t *testing.T) {
msg.Sig = sig
// Create the database
- err = w.db.Atomic(func(ctx context.Context, tx *sql.Tx) error {
- return initDB(tx)
- })
+ err = makeStateProofDB(w.db)
require.NoError(t, err)
msgBytes := protocol.Encode(&msg)
@@ -1134,7 +1550,7 @@ func TestWorkerHandleSigAlreadyIn(t *testing.T) {
})
require.Equal(t, network.OutgoingMessage{Action: network.Broadcast}, reply)
- // The sig is already there. Shoud get error
+ // The sig is already there. Should get error
reply = w.handleSigMessage(network.IncomingMessage{
Data: msgBytes,
})
@@ -1152,9 +1568,13 @@ func TestWorkerHandleSigExceptionsDbError(t *testing.T) {
proto := config.Consensus[protocol.ConsensusCurrentVersion]
lastRound := proto.StateProofInterval * 2
s, w, msg, _ := setBlocksAndMessage(t, basics.Round(lastRound))
- latestBlockHeader, err := w.ledger.BlockHdr(basics.Round(lastRound))
- require.NoError(t, err)
- stateproofMessage, err := GenerateStateProofMessage(w.ledger, proto.StateProofInterval, latestBlockHeader)
+
+ require.NoError(t, w.db.Atomic(func(_ context.Context, tx *sql.Tx) error {
+ _, err := tx.Exec("drop table sigs ")
+ return err
+ }))
+
+ stateproofMessage, err := GenerateStateProofMessage(w.ledger, basics.Round(lastRound))
require.NoError(t, err)
hashedStateproofMessage := stateproofMessage.Hash()
@@ -1174,17 +1594,11 @@ func TestWorkerHandleSigExceptionsDbError(t *testing.T) {
require.Contains(t, "no such table: sigs", err.Error())
}
-// relays reject signatures when could not makeBuilderForRound
-func TestWorkerHandleSigCantMakeBuilder(t *testing.T) {
+// relays reject signatures when could not createAndPersistProver
+func TestWorkerHandleSigCantMakeProver(t *testing.T) {
partitiontest.PartitionTest(t)
proto := config.Consensus[protocol.ConsensusCurrentVersion]
- origProto := proto
- defer func() {
- config.Consensus[protocol.ConsensusCurrentVersion] = origProto
- }()
- proto.StateProofInterval = 512
- config.Consensus[protocol.ConsensusCurrentVersion] = proto
var address basics.Address
crypto.RandBytes(address[:])
@@ -1193,16 +1607,19 @@ func TestWorkerHandleSigCantMakeBuilder(t *testing.T) {
s := newWorkerStubs(t, []account.Participation{p.Participation}, 10)
w := newTestWorker(t, s)
+ w.Start()
+ defer w.Stop()
- for r := 0; r < int(proto.StateProofInterval)*2; r++ {
- s.addBlock(basics.Round(512))
- }
+ s.addBlock(basics.Round(proto.StateProofInterval * 2))
+
+ s.mu.Lock()
// remove the first block from the ledger
- delete(s.blocks, 0)
+ delete(s.blocks, 256)
+ s.mu.Unlock()
msg := sigFromAddr{
SignerAddress: address,
- Round: basics.Round(proto.StateProofInterval),
+ Round: basics.Round(proto.StateProofInterval * 2),
Sig: merklesignature.Signature{},
}
@@ -1215,14 +1632,14 @@ func TestWorkerHandleSigCantMakeBuilder(t *testing.T) {
fwd, err := w.handleSig(msg, msg.SignerAddress)
require.Equal(t, network.Ignore, fwd)
expected := ledgercore.ErrNoEntry{
- Round: 0,
+ Round: 256,
Latest: w.ledger.Latest(),
Committed: w.ledger.Latest(),
}
require.Equal(t, expected, err)
}
-// relays reject signiture for a round where StateProofInterval is 0
+// relays reject signature for a round where StateProofInterval is 0
func TestWorkerHandleSigIntervalZero(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -1234,8 +1651,7 @@ func TestWorkerHandleSigIntervalZero(t *testing.T) {
proto.StateProofInterval = 0
config.Consensus[protocol.ConsensusCurrentVersion] = proto
- intervalRound := basics.Round(proto.StateProofInterval)
- _, w, msg, msgBytes := setBlocksAndMessage(t, intervalRound*2)
+ _, w, msg, msgBytes := setBlocksAndMessage(t, 1)
reply := w.handleSigMessage(network.IncomingMessage{
Data: msgBytes,
@@ -1249,7 +1665,7 @@ func TestWorkerHandleSigIntervalZero(t *testing.T) {
require.Equal(t, expected, err)
}
-// relays reject signiture for a round not multiple of StateProofInterval
+// relays reject signature for a round not multiple of StateProofInterval
func TestWorkerHandleSigNotOnInterval(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -1289,3 +1705,326 @@ func TestWorkerHandleSigCorrupt(t *testing.T) {
})
require.Equal(t, network.OutgoingMessage{Action: network.Disconnect}, reply)
}
+
+func verifyPersistedProvers(a *require.Assertions, w *Worker) {
+ w.mu.Lock()
+
+ defer w.mu.Unlock()
+ for k, v := range w.provers {
+ var proverFromDisk spProver
+ a.NoError(
+ w.db.Atomic(func(_ context.Context, tx *sql.Tx) error {
+ var err error
+ proverFromDisk, err = getProver(tx, k)
+ return err
+ }))
+ a.Equal(v.ProverPersistedFields, proverFromDisk.ProverPersistedFields)
+ }
+}
+
+func TestWorkerCacheAndDiskAfterRestart(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ const expectedStateProofs = proversCacheLength + 1
+ var keys []account.Participation
+ for i := 0; i < 10; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerStubs(t, keys, len(keys))
+ w := newTestWorkerOnDiskDb(t, s)
+ defer os.Remove(w.spDbFileName)
+ w.Start()
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ // we break the loop into two part since we don't want to add a state proof round (Round % 256 == 0)
+ for iter := 0; iter < expectedStateProofs-1; iter++ {
+ s.advanceRoundsWithoutStateProof(t, proto.StateProofInterval)
+ }
+ s.advanceRoundsWithoutStateProof(t, proto.StateProofInterval/2)
+
+ // at this point we expect the number of provers in memory to be equal to proversCacheLength
+ a.Equal(proversCacheLength, len(w.provers))
+ countDB, err := countProversInDB(w.db)
+ a.NoError(err)
+ a.Equal(expectedStateProofs, countDB)
+
+ threshold := onlineProversThreshold(&proto, basics.Round(512)) // 512 since no StateProofs are confirmed yet (512 is the first, commitment at 256)
+ var roundSigs map[basics.Round][]pendingSig
+ err = w.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ roundSigs, err = getPendingSigs(tx, threshold, basics.Round(256+proto.StateProofInterval*expectedStateProofs), false)
+ return
+ })
+ a.NoError(err)
+ a.Equal(proversCacheLength, len(roundSigs)) // Number of broadcasted sigs should be the same as number of (online) cached provers.
+
+ // restart worker
+ w.Stop()
+ // we make sure that the worker will not be able to create a prover by disabling the mock ledger
+ s.keysForVoters = []account.Participation{}
+
+ w.Start()
+ defer w.Stop()
+
+ a.Equal(proversCacheLength, len(w.provers))
+ countDB, err = countProversInDB(w.db)
+ a.NoError(err)
+ a.Equal(expectedStateProofs, countDB)
+
+ verifyPersistedProvers(a, w)
+}
+
+func TestWorkerInitOnlySignaturesInDatabase(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ const expectedStateProofs = proversCacheLength + 1
+ var keys []account.Participation
+ for i := 0; i < 10; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerStubs(t, keys, len(keys))
+ w := newTestWorkerOnDiskDb(t, s)
+ defer os.Remove(w.spDbFileName)
+ w.Start()
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ // we break the loop into two part since we don't want to add a state proof round (Round % 256 == 0)
+ for iter := 0; iter < expectedStateProofs-1; iter++ {
+ s.advanceRoundsWithoutStateProof(t, proto.StateProofInterval)
+ }
+ s.advanceRoundsWithoutStateProof(t, proto.StateProofInterval/2)
+
+ // at this point we expect the number of provers in memory to be bound with proversCacheLength
+ a.Equal(proversCacheLength, len(w.provers))
+ countDB, err := countProversInDB(w.db)
+ a.NoError(err)
+ a.Equal(expectedStateProofs, countDB)
+
+ threshold := onlineProversThreshold(&proto, basics.Round(512)) // 512 since no StateProofs are confirmed yet (512 is the first, commitment at 256)
+ var roundSigs map[basics.Round][]pendingSig
+ err = w.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ roundSigs, err = getPendingSigs(tx, threshold, basics.Round(256+proto.StateProofInterval*expectedStateProofs), false)
+ return
+ })
+ a.NoError(err)
+ a.Equal(proversCacheLength, len(roundSigs)) // Number of broadcasted sigs should be the same as number of (online) cached provers.
+
+ w.Stop()
+
+ accessor, err := db.MakeAccessor(w.spDbFileName, false, false)
+ a.NoError(err)
+ // we now remove all provers from the table. This will cause the worker to create the provers from the ledger.
+ a.NoError(accessor.Atomic(func(_ context.Context, tx *sql.Tx) error {
+ _, err := tx.Exec("DELETE from provers")
+ return err
+ }))
+ accessor.Close()
+
+ w.Start()
+ defer w.Stop()
+
+ a.Equal(proversCacheLength, len(w.provers))
+ countDB, err = countProversInDB(w.db)
+ a.NoError(err)
+ a.Equal(proversCacheLength, countDB)
+
+ verifyPersistedProvers(a, w)
+}
+
+func TestWorkerLoadsProverAndSignatureUponMsgRecv(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ lastRound := proto.StateProofInterval * 2
+ s, w, msg, _ := setBlocksAndMessage(t, basics.Round(lastRound))
+
+ stateproofMessage, err := GenerateStateProofMessage(w.ledger, basics.Round(lastRound))
+ require.NoError(t, err)
+
+ hashedStateproofMessage := stateproofMessage.Hash()
+ spRecords := s.StateProofKeys(basics.Round(proto.StateProofInterval * 2))
+ sig, err := spRecords[0].StateProofSecrets.SignBytes(hashedStateproofMessage[:])
+ require.NoError(t, err)
+
+ msg.Sig = sig
+ // Create the database
+ err = makeStateProofDB(w.db)
+ require.NoError(t, err)
+
+ msgBytes := protocol.Encode(&msg)
+ // add signature so prover will get loaded
+ reply := w.handleSigMessage(network.IncomingMessage{
+ Data: msgBytes,
+ })
+ require.Equal(t, network.OutgoingMessage{Action: network.Broadcast}, reply)
+
+ // we make sure that the worker will not be able to create a prover by disabling the mock ledger
+ s.keysForVoters = []account.Participation{}
+
+ // removing the prover from memory will force the worker to load it from disk
+ w.provers = make(map[basics.Round]spProver)
+ _, exists := w.provers[msg.Round]
+ require.False(t, exists)
+ fwd, err := w.handleSig(msg, msg.SignerAddress)
+ // we expect the handler to ignore the signature since the prover and the old signature were loaded
+ require.Equal(t, network.Ignore, fwd)
+ require.NoError(t, err)
+ _, exists = w.provers[msg.Round]
+ require.True(t, exists)
+
+ // verify that provers can be loaded even if there are no signatures
+ w.provers = make(map[basics.Round]spProver)
+ _, exists = w.provers[msg.Round]
+ require.False(t, exists)
+ require.NoError(t, w.db.Atomic(func(_ context.Context, tx *sql.Tx) error {
+ _, err := tx.Exec("DELETE from sigs")
+ return err
+ }))
+ fwd, err = w.handleSig(msg, msg.SignerAddress)
+ require.Equal(t, network.Broadcast, fwd)
+ require.NoError(t, err)
+ _, exists = w.provers[msg.Round]
+ require.True(t, exists)
+
+ // remove prover from disk and memory we fail the prover creation (since the ledger also returns error)
+ require.NoError(t, w.db.Atomic(func(_ context.Context, tx *sql.Tx) error {
+ _, err := tx.Exec("DELETE from provers")
+ return err
+ }))
+ w.provers = make(map[basics.Round]spProver)
+ _, err = w.handleSig(msg, msg.SignerAddress)
+ require.ErrorIs(t, err, errEmptyVoters)
+ _, exists = w.provers[msg.Round]
+ require.False(t, exists)
+}
+
+func TestWorkerCreatesProversOnCommit(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ _, s, w := createWorkerAndParticipants(t, protocol.ConsensusCurrentVersion, proto)
+ defer w.Stop()
+
+ // We remove the signer's keys to stop it from generating provers.
+ s.keys = []account.Participation{}
+
+ ProverRound := basics.Round(proto.StateProofInterval * 2)
+
+ // We start on round 511, so the callback should be called when committing the next round.
+ s.advanceRoundsWithoutStateProof(t, 2)
+
+ var proverExists bool
+ err := w.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ proverExists, err = proverExistInDB(tx, ProverRound)
+ return err
+ })
+ a.NoError(err)
+ a.False(proverExists)
+
+ // We leave one round uncommitted to be able to easily discern the stateProofNextRound.
+ s.mockCommit(ProverRound)
+
+ proverExists = false
+ err = w.db.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ proverExists, err = proverExistInDB(tx, ProverRound)
+ return err
+ })
+ a.NoError(err)
+ a.True(proverExists)
+}
+
+func TestSignerUsesPersistedProverLatestProto(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ var keys []account.Participation
+ for i := 0; i < 2; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerStubs(t, keys, 10)
+ w := newTestWorkerOnDiskDb(t, s)
+ defer os.Remove(w.spDbFileName)
+ w.Start()
+
+ // We remove the signer's keys to stop it from generating provers and signing.
+ prevKeys := s.keys
+ s.keys = []account.Participation{}
+
+ firstProverRound := basics.Round(proto.StateProofInterval * 2)
+
+ // We start on round 511, so the callback should be called on the next round.
+ s.advanceRoundsWithoutStateProof(t, 2)
+ s.mockCommit(firstProverRound)
+ s.waitForSignerAndBuilder(t)
+
+ a.False(s.isRoundSigned(a, w, firstProverRound))
+
+ // We restart the signing process.
+ s.keys = prevKeys
+ w.Stop()
+
+ w.Start()
+ defer w.Stop()
+
+ // We advance another round to allow us to wait for the signer, allowing it time to finish signing.
+ s.advanceRoundsWithoutStateProof(t, 1)
+ s.waitForSignerAndBuilder(t)
+
+ a.True(s.isRoundSigned(a, w, firstProverRound))
+}
+
+func TestRegisterCommitListener(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := require.New(t)
+
+ const expectedStateProofs = 3
+ var keys []account.Participation
+ for i := 0; i < 10; i++ {
+ var parent basics.Address
+ crypto.RandBytes(parent[:])
+ p := newPartKey(t, parent)
+ defer p.Close()
+ keys = append(keys, p.Participation)
+ }
+
+ s := newWorkerStubs(t, keys, len(keys))
+ a.Nil(s.commitListener)
+
+ w := newTestWorker(t, s)
+ w.Start()
+
+ a.NotNil(s.commitListener)
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ // we break the loop into two part since we don't want to add a state proof round (Round % 256 == 0)
+ for iter := 0; iter < expectedStateProofs-1; iter++ {
+ s.advanceRoundsAndCreateStateProofs(t, proto.StateProofInterval)
+ }
+ s.advanceRoundsAndCreateStateProofs(t, proto.StateProofInterval/2)
+
+ w.Stop()
+
+ a.Nil(s.commitListener)
+}
diff --git a/test/e2e-go/cli/goal/expect/goalAccountTest.exp b/test/e2e-go/cli/goal/expect/goalAccountTest.exp
index 234ef5cec..6f9563cbe 100644..100755
--- a/test/e2e-go/cli/goal/expect/goalAccountTest.exp
+++ b/test/e2e-go/cli/goal/expect/goalAccountTest.exp
@@ -81,9 +81,9 @@ if { [catch {
set TEAL_PROGS_DIR "$TEST_DATA_DIR/../scripts/e2e_subs/tealprogs"
set APP_ID [::AlgorandGoal::AppCreate0 $PRIMARY_WALLET_NAME "" $NEW_ACCOUNT_ADDRESS ${TEAL_PROGS_DIR}/clear_program_state.teal $GLOBAL_BYTE_SLICES $LOCAL_BYTE_SLICES ${TEAL_PROGS_DIR}/clear_program_state.teal $TEST_PRIMARY_NODE_DIR]
- # expect app idx = 2 since a pre-recorded response is checked down the road
- if { $APP_ID != 2 } {
- ::AlgorandGoal::Abort "Expected app id to be 2 but got $APP_ID. Have you posted additional transactions? Only transfer txn is expected before app call txn"
+ # expect app idx = 1002 since a pre-recorded response is checked down the road
+ if { $APP_ID != 1002 } {
+ ::AlgorandGoal::Abort "Expected app id to be 1002 but got $APP_ID. Have you posted additional transactions? Only transfer txn is expected before app call txn"
}
# check JSON output to stdout
@@ -91,7 +91,7 @@ if { [catch {
\"addr\": \"47YPQTIGQEO7T4Y4RWDYWEKV6RTR2UNBQXBABEEGM72ESWDQNCQ52OPASU\",
\"algo\": 299000,
\"appp\": {
- \"2\": {
+ \"1002\": {
\"approv\": \"AiABASI=\",
\"clearp\": \"AiABASI=\",
\"gsch\": {
@@ -110,7 +110,7 @@ if { [catch {
}
# check msgpack output to a file with zero exit code
- set MSGP_EXPECTED_BASE64 "hKRhZGRyxCDn8PhNBoEd+fMcjYeLEVX0Zx1RoYXCAJCGZ/RJWHBooaRhbGdvzgAEj/ikYXBwcIECg6ZhcHByb3bEBQIgAQEipmNsZWFycMQFAiABASKkZ3NjaIGjbmJzAaR0c2NogaNuYnMB"
+ set MSGP_EXPECTED_BASE64 "hKRhZGRyxCDn8PhNBoEd+fMcjYeLEVX0Zx1RoYXCAJCGZ/RJWHBooaRhbGdvzgAEj/ikYXBwcIHNA+qDpmFwcHJvdsQFAiABASKmY2xlYXJwxAUCIAEBIqRnc2NogaNuYnMBpHRzY2iBo25icwE="
set MSGP_EXPECTED [ exec echo -n $MSGP_EXPECTED_BASE64 | base64 --decode ]
set BALREC_FILE "$TEST_ROOT_DIR/brec.msgp"
spawn goal account dump -a $NEW_ACCOUNT_ADDRESS -o $BALREC_FILE --datadir $TEST_PRIMARY_NODE_DIR
diff --git a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
index d8ed3f4e4..4728f445d 100644
--- a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
+++ b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
@@ -56,7 +56,7 @@ proc ::AlgorandGoal::Abort { ERROR } {
if { $::tcl_platform(os) == "Darwin" } {
exec >@stdout 2>@stderr top -l 1
} elseif { $::tcl_platform(os) == "Linux" } {
- exec >@stdout 2>@stderr top -n 1
+ exec >@stdout 2>@stderr top -b -n 1
} else {
# no logging for other platforms
}
@@ -67,37 +67,37 @@ proc ::AlgorandGoal::Abort { ERROR } {
set NODE_DATA_DIR $::GLOBAL_TEST_ROOT_DIR/Primary
if { [file exists $NODE_DATA_DIR] } {
set outLog [exec cat $NODE_DATA_DIR/algod-out.log]
- puts "\n$NODE_DATA_DIR/algod-out.log:\r\n$outLog"
+ puts "\n$NODE_DATA_DIR/algod-out.log:\n$outLog"
set errLog [exec cat $NODE_DATA_DIR/algod-err.log]
- puts "\n$NODE_DATA_DIR/algod-err.log:\r\n$errLog"
+ puts "\n$NODE_DATA_DIR/algod-err.log:\n$errLog"
set nodeLog [exec -- tail -n 50 $NODE_DATA_DIR/node.log]
- puts "\n$NODE_DATA_DIR/node.log:\r\n$nodeLog"
+ puts "\n$NODE_DATA_DIR/node.log:\n$nodeLog"
set LOGS_COLLECTED 1
set outLog [exec cat $NODE_DATA_DIR/kmd-v0.5/kmd-out.log]
- puts "\n$NODE_DATA_DIR/kmd-v0.5/kmd-out.log:\r\n$outLog"
+ puts "\n$NODE_DATA_DIR/kmd-v0.5/kmd-out.log:\n$outLog"
set errLog [exec cat $NODE_DATA_DIR/kmd-v0.5/kmd-err.log]
- puts "\n$NODE_DATA_DIR/kmd-v0.5/kmd-err.log:\r\n$errLog"
+ puts "\n$NODE_DATA_DIR/kmd-v0.5/kmd-err.log:\n$errLog"
set kmdLog [exec -- tail -n 50 $NODE_DATA_DIR/kmd-v0.5/kmd.log]
- puts "\n$NODE_DATA_DIR/kmd-v0.5/kmd.log:\r\n$kmdLog"
+ puts "\n$NODE_DATA_DIR/kmd-v0.5/kmd.log:\n$kmdLog"
}
set NODE_DATA_DIR $::GLOBAL_TEST_ROOT_DIR/Node
puts "Node path $NODE_DATA_DIR"
if { [file exists $NODE_DATA_DIR] } {
set outLog [exec cat $NODE_DATA_DIR/algod-out.log]
- puts "\n$NODE_DATA_DIR/algod-out.log:\r\n$outLog"
+ puts "\n$NODE_DATA_DIR/algod-out.log:\n$outLog"
set errLog [exec cat $NODE_DATA_DIR/algod-err.log]
- puts "\n$NODE_DATA_DIR/algod-err.log:\r\n$errLog"
+ puts "\n$NODE_DATA_DIR/algod-err.log:\n$errLog"
set nodeLog [exec -- tail -n 50 $NODE_DATA_DIR/node.log]
- puts "\n$NODE_DATA_DIR/node.log:\r\n$nodeLog"
+ puts "\n$NODE_DATA_DIR/node.log:\n$nodeLog"
set LOGS_COLLECTED 1
set outLog [exec cat $NODE_DATA_DIR/kmd-v0.5/kmd-out.log]
- puts "\n$NODE_DATA_DIR/kmd-v0.5/kmd-out.log:\r\n$outLog"
+ puts "\n$NODE_DATA_DIR/kmd-v0.5/kmd-out.log:\n$outLog"
set errLog [exec cat $NODE_DATA_DIR/kmd-v0.5/kmd-err.log]
- puts "\n$NODE_DATA_DIR/kmd-v0.5/kmd-err.log:\r\n$errLog"
+ puts "\n$NODE_DATA_DIR/kmd-v0.5/kmd-err.log:\n$errLog"
set kmdLog [exec -- tail -n 50 $NODE_DATA_DIR/kmd-v0.5/kmd.log]
- puts "\n$NODE_DATA_DIR/kmd-v0.5/kmd.log:\r\n$kmdLog"
+ puts "\n$NODE_DATA_DIR/kmd-v0.5/kmd.log:\n$kmdLog"
}
}
@@ -109,18 +109,18 @@ proc ::AlgorandGoal::Abort { ERROR } {
if { $LOGS_COLLECTED == 0 } {
log_user 1
set outLog [exec cat $::GLOBAL_TEST_ALGO_DIR/algod-out.log]
- puts "\n$::GLOBAL_TEST_ALGO_DIR/algod-out.log:\r\n$outLog"
+ puts "\n$::GLOBAL_TEST_ALGO_DIR/algod-out.log:\n$outLog"
set errLog [exec cat $::GLOBAL_TEST_ALGO_DIR/algod-err.log]
- puts "\n$::GLOBAL_TEST_ALGO_DIR/algod-err.log:\r\n$errLog"
+ puts "\n$::GLOBAL_TEST_ALGO_DIR/algod-err.log:\n$errLog"
set nodeLog [exec -- tail -n 50 $::GLOBAL_TEST_ALGO_DIR/node.log]
- puts "\n$::GLOBAL_TEST_ALGO_DIR/node.log:\r\n$nodeLog"
+ puts "\n$::GLOBAL_TEST_ALGO_DIR/node.log:\n$nodeLog"
set outLog [exec cat $GLOBAL_TEST_ALGO_DIR/kmd-v0.5/kmd-out.log]
- puts "\n$GLOBAL_TEST_ALGO_DIR/kmd-v0.5/kmd-out.log:\r\n$outLog"
+ puts "\n$GLOBAL_TEST_ALGO_DIR/kmd-v0.5/kmd-out.log:\n$outLog"
set errLog [exec cat $GLOBAL_TEST_ALGO_DIR/kmd-v0.5/kmd-err.log]
- puts "\n$GLOBAL_TEST_ALGO_DIR/kmd-v0.5/kmd-err.log:\r\n$errLog"
+ puts "\n$GLOBAL_TEST_ALGO_DIR/kmd-v0.5/kmd-err.log:\n$errLog"
set kmdLog [exec -- tail -n 50 $GLOBAL_TEST_ALGO_DIR/kmd-v0.5/kmd.log]
- puts "\n$GLOBAL_TEST_ALGO_DIR/kmd-v0.5/kmd.log:\r\n$kmdLog"
+ puts "\n$GLOBAL_TEST_ALGO_DIR/kmd-v0.5/kmd.log:\n$kmdLog"
}
}
@@ -859,7 +859,7 @@ proc ::AlgorandGoal::VerifyMultisigInfoForOneOfTwoMultisig { MULTISIG_ADDRESS AD
spawn goal account multisig info --address $MULTISIG_ADDRESS -d $TEST_PRIMARY_NODE_DIR -w $WALLET_NAME
expect {
timeout { ::AlgorandGoal::Abort "Timed out querying info about multisig account $MULTISIG_ADDRESS" }
- -re {Version: (\d+)\r\nThreshold: (\d+)\r\nPublic keys:\r\n ([a-zA-Z0-9]+)\r\n ([a-zA-Z0-9]+)\r\n} {
+ -re {Version: (\d+)\s+Threshold: (\d+)\s+Public keys:\s+([a-zA-Z0-9]+)\s+([a-zA-Z0-9]+)\s+} {
set VERSION $expect_out(1,string);
set THRESHOLD $expect_out(2,string);
set ADDRESS_RESPONSE_1 $expect_out(3,string);
diff --git a/test/e2e-go/features/accountPerf/sixMillion_test.go b/test/e2e-go/features/accountPerf/sixMillion_test.go
index cc2095dfb..7548b259b 100644
--- a/test/e2e-go/features/accountPerf/sixMillion_test.go
+++ b/test/e2e-go/features/accountPerf/sixMillion_test.go
@@ -680,12 +680,13 @@ func scenarioB(
require.Equal(t, numberOfAssets, info.TotalCreatedAssets)
log.Infof("Verifying assets...")
- // Verify the assets are transfered here
+ // Verify the assets are transferred here
tAssetAmt := uint64(0)
counter = 0
- // this loop iterates over all the range of potentail assets, tries to confirm all of them.
+ // this loop iterates over all the range of potential assets, tries to confirm all of them.
// many of these are expected to be non-existing.
- for aid := uint64(0); counter < numberOfAssets && aid < 2*numberOfAssets; aid++ {
+ startIdx := uint64(1000) // tx counter starts from 1000
+ for aid := startIdx; counter < numberOfAssets && aid < 2*startIdx*numberOfAssets; aid++ {
select {
case <-stopChan:
require.False(t, true, "Test interrupted")
diff --git a/test/e2e-go/features/catchup/basicCatchup_test.go b/test/e2e-go/features/catchup/basicCatchup_test.go
index 435393f70..7663e54b3 100644
--- a/test/e2e-go/features/catchup/basicCatchup_test.go
+++ b/test/e2e-go/features/catchup/basicCatchup_test.go
@@ -35,9 +35,6 @@ func TestBasicCatchup(t *testing.T) {
partitiontest.PartitionTest(t)
defer fixtures.ShutdownSynchronizedTest(t)
- if testing.Short() {
- t.Skip()
- }
t.Parallel()
a := require.New(fixtures.SynchronizedTest(t))
@@ -48,12 +45,12 @@ func TestBasicCatchup(t *testing.T) {
var fixture fixtures.RestClientFixture
// Give the second node (which starts up last) all the stake so that its proposal always has better credentials,
- // and so that its proposal isn't dropped. Otherwise the test burns 17s to recover. We don't care about stake
+ // and so that its proposal isn't dropped. Otherwise, the test burns 17s to recover. We don't care about stake
// distribution for catchup so this is fine.
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes100Second.json"))
defer fixture.Shutdown()
- // Get 2nd node so we wait until we know they're at target block
+ // Get 2nd node, so we wait until we know they're at target block
nc, err := fixture.GetNodeController("Node")
a.NoError(err)
@@ -75,6 +72,24 @@ func TestBasicCatchup(t *testing.T) {
// Now, catch up
err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(cloneClient, waitForRound)
a.NoError(err)
+
+ cloneNC := fixture.GetNodeControllerForDataDir(cloneDataDir)
+ cloneRestClient := fixture.GetAlgodClientForController(cloneNC)
+
+ // an immediate call for ready will error, for sync time != 0
+ a.Error(cloneRestClient.ReadyCheck())
+
+ for {
+ status, err := cloneRestClient.Status()
+ a.NoError(err)
+
+ if status.LastRound < 10 {
+ time.Sleep(250 * time.Millisecond)
+ continue
+ }
+ a.NoError(cloneRestClient.ReadyCheck())
+ break
+ }
}
// TestCatchupOverGossip tests catchup across network versions
diff --git a/test/e2e-go/features/catchup/catchpointCatchup_test.go b/test/e2e-go/features/catchup/catchpointCatchup_test.go
index e8de54e66..5d4e9b194 100644
--- a/test/e2e-go/features/catchup/catchpointCatchup_test.go
+++ b/test/e2e-go/features/catchup/catchpointCatchup_test.go
@@ -26,11 +26,10 @@ import (
"testing"
"time"
- "github.com/algorand/go-deadlock"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
- algodclient "github.com/algorand/go-algorand/daemon/algod/api/client"
+ "github.com/algorand/go-algorand/daemon/algod/api/client"
"github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/ledger/ledgercore"
@@ -39,13 +38,122 @@ import (
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/framework/fixtures"
"github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-deadlock"
)
+const basicTestCatchpointInterval = 4
+
+func waitForCatchpointGeneration(fixture *fixtures.RestClientFixture, client client.RestClient, catchpointRound basics.Round) (string, error) {
+ err := fixture.ClientWaitForRoundWithTimeout(client, uint64(catchpointRound+1))
+ if err != nil {
+ return "", err
+ }
+
+ var status model.NodeStatusResponse
+ timer := time.NewTimer(10 * time.Second)
+ for {
+ status, err = client.Status()
+ if err != nil {
+ return "", err
+ }
+
+ var round basics.Round
+ if status.LastCatchpoint != nil && len(*status.LastCatchpoint) > 0 {
+ round, _, err = ledgercore.ParseCatchpointLabel(*status.LastCatchpoint)
+ if err != nil {
+ return "", err
+ }
+ if round >= catchpointRound {
+ break
+ }
+ }
+ select {
+ case <-timer.C:
+ return "", fmt.Errorf("timeout while waiting for catchpoint, target: %d, got %d", catchpointRound, round)
+ default:
+ time.Sleep(250 * time.Millisecond)
+ }
+ }
+
+ return *status.LastCatchpoint, nil
+}
+
+func denyRoundRequestsWebProxy(a *require.Assertions, listeningAddress string, round basics.Round) *fixtures.WebProxy {
+ log := logging.NewLogger()
+ log.SetLevel(logging.Info)
+
+ wp, err := fixtures.MakeWebProxy(listeningAddress, log, func(response http.ResponseWriter, request *http.Request, next http.HandlerFunc) {
+ // prevent requests for the given block to go through.
+ if request.URL.String() == fmt.Sprintf("/v1/test-v1/block/%d", round) {
+ response.WriteHeader(http.StatusBadRequest)
+ response.Write([]byte(fmt.Sprintf("webProxy prevents block %d from serving", round)))
+ return
+ }
+ next(response, request)
+ })
+ a.NoError(err)
+ log.Infof("web proxy listens at %s\n", wp.GetListenAddress())
+ return wp
+}
+
+func getFirstCatchpointRound(consensusParams *config.ConsensusParams) basics.Round {
+ // fast catchup downloads some blocks back from catchpoint round - CatchpointLookback
+ expectedBlocksToDownload := consensusParams.MaxTxnLife + consensusParams.DeeperBlockHeaderHistory
+ const restrictedBlockRound = 2 // block number that is rejected to be downloaded to ensure fast catchup and not regular catchup is running
+ // calculate the target round: this is the next round after catchpoint
+ // that is greater than expectedBlocksToDownload before the restrictedBlock block number
+ minRound := restrictedBlockRound + consensusParams.CatchpointLookback
+ return basics.Round(((expectedBlocksToDownload+minRound)/basicTestCatchpointInterval + 1) * basicTestCatchpointInterval)
+}
+
+func applyCatchpointConsensusChanges(consensusParams *config.ConsensusParams) {
+ // MaxBalLookback = 2 x SeedRefreshInterval x SeedLookback
+ // ref. https://github.com/algorandfoundation/specs/blob/master/dev/abft.md
+ consensusParams.SeedLookback = 2
+ consensusParams.SeedRefreshInterval = 2
+ consensusParams.MaxBalLookback = 2 * consensusParams.SeedLookback * consensusParams.SeedRefreshInterval // 8
+ consensusParams.MaxTxnLife = 13
+ consensusParams.CatchpointLookback = consensusParams.MaxBalLookback
+ consensusParams.EnableCatchpointsWithSPContexts = true
+ if runtime.GOARCH == "amd64" || runtime.GOARCH == "arm64" {
+ // amd64 and arm64 platforms are generally quite capable, so accelerate the round times to make the test run faster.
+ consensusParams.AgreementFilterTimeoutPeriod0 = 1 * time.Second
+ consensusParams.AgreementFilterTimeout = 1 * time.Second
+ }
+ consensusParams.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+}
+
+func configureCatchpointGeneration(a *require.Assertions, nodeController *nodecontrol.NodeController) {
+ cfg, err := config.LoadConfigFromDisk(nodeController.GetDataDir())
+ a.NoError(err)
+
+ cfg.CatchpointInterval = basicTestCatchpointInterval
+ cfg.MaxAcctLookback = 2
+ err = cfg.SaveToDisk(nodeController.GetDataDir())
+ a.NoError(err)
+}
+
+func configureCatchpointUsage(a *require.Assertions, nodeController *nodecontrol.NodeController) {
+ cfg, err := config.LoadConfigFromDisk(nodeController.GetDataDir())
+ a.NoError(err)
+
+ cfg.MaxAcctLookback = 2
+ cfg.Archival = false
+ cfg.CatchpointInterval = 0
+ cfg.NetAddress = ""
+ cfg.EnableLedgerService = false
+ cfg.EnableBlockService = false
+ cfg.BaseLoggerDebugLevel = uint32(logging.Debug)
+ cfg.CatchupBlockValidateMode = 12
+ err = cfg.SaveToDisk(nodeController.GetDataDir())
+ a.NoError(err)
+}
+
type nodeExitErrorCollector struct {
errors []error
messages []string
mu deadlock.Mutex
- t fixtures.TestingTB
+ a *require.Assertions
}
func (ec *nodeExitErrorCollector) nodeExitWithError(nc *nodecontrol.NodeController, err error) {
@@ -78,106 +186,47 @@ func (ec *nodeExitErrorCollector) Print() {
ec.mu.Lock()
defer ec.mu.Unlock()
for i, err := range ec.errors {
- require.NoError(ec.t, err, ec.messages[i])
+ ec.a.NoError(err, ec.messages[i])
}
}
-// awaitCatchpointCreation attempts catchpoint retrieval with retries when the catchpoint is not yet available.
-func awaitCatchpointCreation(client algodclient.RestClient, fixture *fixtures.RestClientFixture, roundWaitCount uint8) (model.NodeStatusResponse, error) {
- s, err := client.Status()
- if err != nil {
- return model.NodeStatusResponse{}, err
- }
-
- if len(*s.LastCatchpoint) > 0 {
- return s, nil
+func startCatchpointGeneratingNode(a *require.Assertions, fixture *fixtures.RestClientFixture, nodeName string) (
+ nodecontrol.NodeController, client.RestClient, *nodeExitErrorCollector) {
+ nodeController, err := fixture.GetNodeController(nodeName)
+ a.NoError(err)
- }
+ configureCatchpointGeneration(a, &nodeController)
- if roundWaitCount-1 > 0 {
- err = fixture.ClientWaitForRound(client, s.LastRound+1, 10*time.Second)
- if err != nil {
- return model.NodeStatusResponse{}, err
- }
+ errorsCollector := nodeExitErrorCollector{a: a}
+ _, err = nodeController.StartAlgod(nodecontrol.AlgodStartArgs{
+ PeerAddress: "",
+ ListenIP: "",
+ RedirectOutput: true,
+ RunUnderHost: false,
+ TelemetryOverride: "",
+ ExitErrorCallback: errorsCollector.nodeExitWithError,
+ })
+ a.NoError(err)
- return awaitCatchpointCreation(client, fixture, roundWaitCount-1)
- }
+ restClient := fixture.GetAlgodClientForController(nodeController)
+ // We don't want to start using the node without it being properly initialized.
+ err = fixture.ClientWaitForRoundWithTimeout(restClient, 1)
+ a.NoError(err)
- return model.NodeStatusResponse{}, fmt.Errorf("No catchpoint exists")
+ return nodeController, restClient, &errorsCollector
}
-func TestBasicCatchpointCatchup(t *testing.T) {
- partitiontest.PartitionTest(t)
- defer fixtures.ShutdownSynchronizedTest(t)
-
- if testing.Short() {
- t.Skip()
- }
- a := require.New(fixtures.SynchronizedTest(t))
- log := logging.TestingLog(t)
-
- // Overview of this test:
- // Start a two-node network (primary has 100%, secondary has 0%)
- // Nodes are having a consensus allowing balances history of 8 rounds and transaction history of 13 rounds.
- // Let it run for 21 rounds.
- // create a web proxy, and connect it to the primary node, blocking all requests for round #2. ( and allowing everything else )
- // start a secondary node, and instuct it to catchpoint catchup from the proxy. ( which would be for round 20 )
- // wait until the clone node cought up, skipping the "impossible" hole of round #2.
-
- consensus := make(config.ConsensusProtocols)
- const consensusCatchpointCatchupTestProtocol = protocol.ConsensusVersion("catchpointtestingprotocol")
- catchpointCatchupProtocol := config.Consensus[protocol.ConsensusCurrentVersion]
- catchpointCatchupProtocol.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- // MaxBalLookback = 2 x SeedRefreshInterval x SeedLookback
- // ref. https://github.com/algorandfoundation/specs/blob/master/dev/abft.md
- catchpointCatchupProtocol.SeedLookback = 2
- catchpointCatchupProtocol.SeedRefreshInterval = 2
- catchpointCatchupProtocol.MaxBalLookback = 2 * catchpointCatchupProtocol.SeedLookback * catchpointCatchupProtocol.SeedRefreshInterval // 8
- catchpointCatchupProtocol.MaxTxnLife = 13
- catchpointCatchupProtocol.CatchpointLookback = catchpointCatchupProtocol.MaxBalLookback
- catchpointCatchupProtocol.EnableOnlineAccountCatchpoints = true
-
- if runtime.GOARCH == "amd64" {
- // amd64 platforms are generally quite capable, so accelerate the round times to make the test run faster.
- catchpointCatchupProtocol.AgreementFilterTimeoutPeriod0 = 1 * time.Second
- catchpointCatchupProtocol.AgreementFilterTimeout = 1 * time.Second
- }
-
- consensus[consensusCatchpointCatchupTestProtocol] = catchpointCatchupProtocol
-
- var fixture fixtures.RestClientFixture
- fixture.SetConsensus(consensus)
-
- errorsCollector := nodeExitErrorCollector{t: fixtures.SynchronizedTest(t)}
- defer errorsCollector.Print()
-
- fixture.SetupNoStart(t, filepath.Join("nettemplates", "CatchpointCatchupTestNetwork.json"))
-
- // Get primary node
- primaryNode, err := fixture.GetNodeController("Primary")
- a.NoError(err)
- // Get secondary node
- secondNode, err := fixture.GetNodeController("Node")
+func startCatchpointUsingNode(a *require.Assertions, fixture *fixtures.RestClientFixture, nodeName string, peerAddress string) (
+ nodecontrol.NodeController, client.RestClient, *fixtures.WebProxy, *nodeExitErrorCollector) {
+ nodeController, err := fixture.GetNodeController(nodeName)
a.NoError(err)
- // prepare it's configuration file to set it to generate a catchpoint every 4 rounds.
- cfg, err := config.LoadConfigFromDisk(primaryNode.GetDataDir())
- a.NoError(err)
- const catchpointInterval = 4
- cfg.CatchpointInterval = catchpointInterval
- cfg.MaxAcctLookback = 2
- cfg.SaveToDisk(primaryNode.GetDataDir())
- cfg.Archival = false
- cfg.CatchpointInterval = 0
- cfg.NetAddress = ""
- cfg.EnableLedgerService = false
- cfg.EnableBlockService = false
- cfg.BaseLoggerDebugLevel = uint32(logging.Debug)
- cfg.SaveToDisk(secondNode.GetDataDir())
+ configureCatchpointUsage(a, &nodeController)
- // start the primary node
- _, err = primaryNode.StartAlgod(nodecontrol.AlgodStartArgs{
- PeerAddress: "",
+ wp := denyRoundRequestsWebProxy(a, peerAddress, 2)
+ errorsCollector := nodeExitErrorCollector{a: a}
+ _, err = nodeController.StartAlgod(nodecontrol.AlgodStartArgs{
+ PeerAddress: wp.GetListenAddress(),
ListenIP: "",
RedirectOutput: true,
RunUnderHost: false,
@@ -185,49 +234,23 @@ func TestBasicCatchpointCatchup(t *testing.T) {
ExitErrorCallback: errorsCollector.nodeExitWithError,
})
a.NoError(err)
- defer primaryNode.StopAlgod()
- // Let the network make some progress
- currentRound := uint64(1)
- // fast catchup downloads some blocks back from catchpoint round - CatchpointLookback
- expectedBlocksToDownload := catchpointCatchupProtocol.MaxTxnLife + catchpointCatchupProtocol.DeeperBlockHeaderHistory
- const restrictedBlockRound = 2 // block number that is rejected to be downloaded to ensure fast catchup and not regular catchup is running
- // calculate the target round: this is the next round after catchpoint
- // that is greater than expectedBlocksToDownload before the restrictedBlock block number
- minRound := restrictedBlockRound + catchpointCatchupProtocol.CatchpointLookback
- targetCatchpointRound := (basics.Round(expectedBlocksToDownload+minRound)/catchpointInterval + 1) * catchpointInterval
- targetRound := uint64(targetCatchpointRound) + 1
- primaryNodeRestClient := fixture.GetAlgodClientForController(primaryNode)
- log.Infof("Building ledger history..")
- for {
- err = fixture.ClientWaitForRound(primaryNodeRestClient, currentRound, 45*time.Second)
- a.NoError(err)
- if targetRound <= currentRound {
- break
- }
- currentRound++
- }
- log.Infof("done building!\n")
-
- primaryListeningAddress, err := primaryNode.GetListeningAddress()
+ restClient := fixture.GetAlgodClientForController(nodeController)
+ // We don't want to start using the node without it being properly initialized.
+ err = fixture.ClientWaitForRoundWithTimeout(restClient, 1)
a.NoError(err)
- wp, err := fixtures.MakeWebProxy(primaryListeningAddress, log, func(response http.ResponseWriter, request *http.Request, next http.HandlerFunc) {
- // prevent requests for block #2 to go through.
- if request.URL.String() == "/v1/test-v1/block/2" {
- response.WriteHeader(http.StatusBadRequest)
- response.Write([]byte("webProxy prevents block 2 from serving"))
- return
- }
- next(response, request)
- })
+ return nodeController, restClient, wp, &errorsCollector
+}
+
+func startCatchpointNormalNode(a *require.Assertions, fixture *fixtures.RestClientFixture, nodeName string, peerAddress string) (
+ nodecontrol.NodeController, client.RestClient, *nodeExitErrorCollector) {
+ nodeController, err := fixture.GetNodeController(nodeName)
a.NoError(err)
- defer wp.Close()
- log.Infof("web proxy listens at %s\n", wp.GetListenAddress())
- // start the second node
- _, err = secondNode.StartAlgod(nodecontrol.AlgodStartArgs{
- PeerAddress: wp.GetListenAddress(),
+ errorsCollector := nodeExitErrorCollector{a: a}
+ _, err = nodeController.StartAlgod(nodecontrol.AlgodStartArgs{
+ PeerAddress: peerAddress,
ListenIP: "",
RedirectOutput: true,
RunUnderHost: false,
@@ -235,67 +258,69 @@ func TestBasicCatchpointCatchup(t *testing.T) {
ExitErrorCallback: errorsCollector.nodeExitWithError,
})
a.NoError(err)
- defer secondNode.StopAlgod()
- // wait until node is caught up.
- secondNodeRestClient := fixture.GetAlgodClientForController(secondNode)
+ restClient := fixture.GetAlgodClientForController(nodeController)
+ // We don't want to start using the node without it being properly initialized.
+ err = fixture.ClientWaitForRoundWithTimeout(restClient, 1)
+ a.NoError(err)
- currentRound = uint64(1)
- secondNodeTargetRound := uint64(1)
- log.Infof("Second node catching up to round 1")
- for {
- err = fixture.ClientWaitForRound(secondNodeRestClient, currentRound, 10*time.Second)
- a.NoError(err)
- if secondNodeTargetRound <= currentRound {
- break
- }
- currentRound++
+ return nodeController, restClient, &errorsCollector
+}
- }
- log.Infof(" - done catching up!\n")
+func getFixture(consensusParams *config.ConsensusParams) *fixtures.RestClientFixture {
+ consensus := make(config.ConsensusProtocols)
+ const consensusCatchpointCatchupTestProtocol = protocol.ConsensusVersion("catchpointtestingprotocol")
+ consensus[consensusCatchpointCatchupTestProtocol] = *consensusParams
- // ensure the catchpoint is created for targetCatchpointRound
- var status model.NodeStatusResponse
- timer := time.NewTimer(10 * time.Second)
-outer:
- for {
- status, err = primaryNodeRestClient.Status()
- a.NoError(err)
+ var fixture fixtures.RestClientFixture
+ fixture.SetConsensus(consensus)
+ return &fixture
+}
- var round basics.Round
- if status.LastCatchpoint != nil && len(*status.LastCatchpoint) > 0 {
- round, _, err = ledgercore.ParseCatchpointLabel(*status.LastCatchpoint)
- a.NoError(err)
- if round >= targetCatchpointRound {
- break
- }
- }
- select {
- case <-timer.C:
- a.Failf("timeout waiting a catchpoint", "target: %d, got %d", targetCatchpointRound, round)
- break outer
- default:
- time.Sleep(250 * time.Millisecond)
- }
+func TestBasicCatchpointCatchup(t *testing.T) {
+ // Overview of this test:
+ // Start a two-node network (primary has 100%, using has 0%)
+ // create a web proxy, have the using node use it as a peer, blocking all requests for round #2. ( and allowing everything else )
+ // Let it run until the first usable catchpoint, as computed in getFirstCatchpointRound, is generated.
+ // instruct the using node to catchpoint catchup from the proxy.
+ // wait until the using node is caught up to catchpointRound+1, skipping the "impossible" hole of round #2 and
+ // participating in consensus.
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ if testing.Short() {
+ t.Skip()
}
- log.Infof("primary node latest catchpoint - %s!\n", *status.LastCatchpoint)
- _, err = secondNodeRestClient.Catchup(*status.LastCatchpoint)
+ consensusParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ applyCatchpointConsensusChanges(&consensusParams)
+ a := require.New(fixtures.SynchronizedTest(t))
+
+ fixture := getFixture(&consensusParams)
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "CatchpointCatchupTestNetwork.json"))
+
+ primaryNode, primaryNodeRestClient, primaryErrorsCollector := startCatchpointGeneratingNode(a, fixture, "Primary")
+ defer primaryErrorsCollector.Print()
+ defer primaryNode.StopAlgod()
+
+ primaryNodeAddr, err := primaryNode.GetListeningAddress()
a.NoError(err)
- currentRound = status.LastRound
- a.LessOrEqual(targetRound, currentRound)
- fixtureTargetRound := targetRound + 1
- log.Infof("Second node catching up to round %v", currentRound)
- for {
- err = fixture.ClientWaitForRound(secondNodeRestClient, currentRound, 10*time.Second)
- a.NoError(err)
- if fixtureTargetRound <= currentRound {
- break
- }
- currentRound++
- }
- log.Infof("done catching up!\n")
+ usingNode, usingNodeRestClient, wp, usingNodeErrorsCollector := startCatchpointUsingNode(a, fixture, "Node", primaryNodeAddr)
+ defer usingNodeErrorsCollector.Print()
+ defer wp.Close()
+ defer usingNode.StopAlgod()
+
+ targetCatchpointRound := getFirstCatchpointRound(&consensusParams)
+
+ catchpointLabel, err := waitForCatchpointGeneration(fixture, primaryNodeRestClient, targetCatchpointRound)
+ a.NoError(err)
+
+ _, err = usingNodeRestClient.Catchup(catchpointLabel)
+ a.NoError(err)
+
+ err = fixture.ClientWaitForRoundWithTimeout(usingNodeRestClient, uint64(targetCatchpointRound+1))
+ a.NoError(err)
}
func TestCatchpointLabelGeneration(t *testing.T) {
@@ -324,28 +349,13 @@ func TestCatchpointLabelGeneration(t *testing.T) {
consensus := make(config.ConsensusProtocols)
const consensusCatchpointCatchupTestProtocol = protocol.ConsensusVersion("catchpointtestingprotocol")
catchpointCatchupProtocol := config.Consensus[protocol.ConsensusCurrentVersion]
- catchpointCatchupProtocol.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- // MaxBalLookback = 2 x SeedRefreshInterval x SeedLookback
- // ref. https://github.com/algorandfoundation/specs/blob/master/dev/abft.md
- catchpointCatchupProtocol.SeedLookback = 2
- catchpointCatchupProtocol.SeedRefreshInterval = 2
- catchpointCatchupProtocol.MaxBalLookback = 2 * catchpointCatchupProtocol.SeedLookback * catchpointCatchupProtocol.SeedRefreshInterval // 8
- catchpointCatchupProtocol.MaxTxnLife = 13
- catchpointCatchupProtocol.CatchpointLookback = catchpointCatchupProtocol.MaxBalLookback
- catchpointCatchupProtocol.EnableOnlineAccountCatchpoints = true
-
- if runtime.GOARCH == "amd64" {
- // amd64 platforms are generally quite capable, so accelerate the round times to make the test run faster.
- catchpointCatchupProtocol.AgreementFilterTimeoutPeriod0 = 1 * time.Second
- catchpointCatchupProtocol.AgreementFilterTimeout = 1 * time.Second
- }
-
+ applyCatchpointConsensusChanges(&catchpointCatchupProtocol)
consensus[consensusCatchpointCatchupTestProtocol] = catchpointCatchupProtocol
var fixture fixtures.RestClientFixture
fixture.SetConsensus(consensus)
- errorsCollector := nodeExitErrorCollector{t: fixtures.SynchronizedTest(t)}
+ errorsCollector := nodeExitErrorCollector{a: a}
defer errorsCollector.Print()
fixture.SetupNoStart(t, filepath.Join("nettemplates", "CatchpointCatchupTestNetwork.json"))
@@ -417,20 +427,8 @@ func TestNodeTxHandlerRestart(t *testing.T) {
consensus := make(config.ConsensusProtocols)
protoVersion := protocol.ConsensusCurrentVersion
catchpointCatchupProtocol := config.Consensus[protoVersion]
- catchpointCatchupProtocol.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- // MaxBalLookback = 2 x SeedRefreshInterval x SeedLookback
- // ref. https://github.com/algorandfoundation/specs/blob/master/dev/abft.md
- catchpointCatchupProtocol.SeedLookback = 2
- catchpointCatchupProtocol.SeedRefreshInterval = 2
- catchpointCatchupProtocol.MaxBalLookback = 2 * catchpointCatchupProtocol.SeedLookback * catchpointCatchupProtocol.SeedRefreshInterval // 8
- catchpointCatchupProtocol.CatchpointLookback = catchpointCatchupProtocol.MaxBalLookback
- catchpointCatchupProtocol.EnableOnlineAccountCatchpoints = true
+ applyCatchpointConsensusChanges(&catchpointCatchupProtocol)
catchpointCatchupProtocol.StateProofInterval = 0
- if runtime.GOOS == "darwin" || runtime.GOARCH == "amd64" {
- // amd64/macos platforms are generally quite capable, so accelerate the round times to make the test run faster.
- catchpointCatchupProtocol.AgreementFilterTimeoutPeriod0 = 1 * time.Second
- catchpointCatchupProtocol.AgreementFilterTimeout = 1 * time.Second
- }
consensus[protoVersion] = catchpointCatchupProtocol
var fixture fixtures.RestClientFixture
@@ -537,6 +535,161 @@ outer:
a.NoError(err)
}
+// TestReadyEndpoint starts a two-node network (derived mainly from TestNodeTxHandlerRestart)
+// Lets the primary node have the majority of the stake
+// Waits until a catchpoint is created
+// Let primary node catch up against the catchpoint, confirm ready endpoint is 503
+// Wait the primary node catch up to target round, and confirm ready endpoint is 200
+func TestReadyEndpoint(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ if testing.Short() {
+ t.Skip()
+ }
+ a := require.New(fixtures.SynchronizedTest(t))
+
+ consensus := make(config.ConsensusProtocols)
+ protoVersion := protocol.ConsensusCurrentVersion
+ catchpointCatchupProtocol := config.Consensus[protoVersion]
+ applyCatchpointConsensusChanges(&catchpointCatchupProtocol)
+ catchpointCatchupProtocol.StateProofInterval = 0
+ consensus[protoVersion] = catchpointCatchupProtocol
+
+ var fixture fixtures.RestClientFixture
+ fixture.SetConsensus(consensus)
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "TwoNodes50EachWithRelay.json"))
+
+ // Get primary node
+ primaryNode, err := fixture.GetNodeController("Node1")
+ a.NoError(err)
+ // Get secondary node
+ secondNode, err := fixture.GetNodeController("Node2")
+ a.NoError(err)
+ // Get the relay
+ relayNode, err := fixture.GetNodeController("Relay")
+ a.NoError(err)
+
+ // prepare its configuration file to set it to generate a catchpoint every 16 rounds.
+ cfg, err := config.LoadConfigFromDisk(primaryNode.GetDataDir())
+ a.NoError(err)
+ const catchpointInterval = 16
+ cfg.CatchpointInterval = catchpointInterval
+ cfg.CatchpointTracking = 2
+ cfg.MaxAcctLookback = 2
+ cfg.Archival = false
+ cfg.TxSyncIntervalSeconds = 200000 // disable txSync
+
+ err = cfg.SaveToDisk(primaryNode.GetDataDir())
+ a.NoError(err)
+ err = cfg.SaveToDisk(secondNode.GetDataDir())
+ a.NoError(err)
+
+ cfg, err = config.LoadConfigFromDisk(relayNode.GetDataDir())
+ a.NoError(err)
+ cfg.TxSyncIntervalSeconds = 200000 // disable txSync
+ cfg.SaveToDisk(relayNode.GetDataDir())
+
+ fixture.Start()
+ defer fixture.LibGoalFixture.Shutdown()
+
+ client1 := fixture.GetLibGoalClientFromNodeController(primaryNode)
+ client2 := fixture.GetLibGoalClientFromNodeController(secondNode)
+ wallet1, err := client1.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ wallet2, err := client2.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ addrs1, err := client1.ListAddresses(wallet1)
+ a.NoError(err)
+ addrs2, err := client2.ListAddresses(wallet2)
+ a.NoError(err)
+
+ // let the second node have insufficient stake for proposing a block
+ tx, err := client2.SendPaymentFromUnencryptedWallet(addrs2[0], addrs1[0], 1000, 4999999999000000, nil)
+ a.NoError(err)
+ status, err := client1.Status()
+ a.NoError(err)
+ _, err = fixture.WaitForConfirmedTxn(status.LastRound+100, addrs1[0], tx.ID().String())
+ a.NoError(err)
+ targetCatchpointRound := status.LastRound
+
+ // ensure the catchpoint is created for targetCatchpointRound
+ timer := time.NewTimer(100 * time.Second)
+outer:
+ for {
+ status, err = client1.Status()
+ a.NoError(err)
+
+ var round basics.Round
+ if status.LastCatchpoint != nil && len(*status.LastCatchpoint) > 0 {
+ round, _, err = ledgercore.ParseCatchpointLabel(*status.LastCatchpoint)
+ a.NoError(err)
+ if uint64(round) >= targetCatchpointRound {
+ break
+ }
+ }
+ select {
+ case <-timer.C:
+ a.Failf("timeout waiting a catchpoint", "target: %d, got %d", targetCatchpointRound, round)
+ break outer
+ default:
+ time.Sleep(250 * time.Millisecond)
+ }
+ }
+
+ //////////
+ // NOTE //
+ //////////
+ // THE *REAL* TEST STARTS HERE:
+ // We first ensure when a primary node is catching up, it is not ready
+ // Then when the primary node is at target round, it should satisfy ready 200 condition
+
+ // let the primary node catchup
+ err = client1.Catchup(*status.LastCatchpoint)
+ a.NoError(err)
+
+ // The primary node is catching up with its previous catchpoint
+ // Its status contain a catchpoint it is catching-up against,
+ // so it should not be ready, and ready-ness endpoint should 503 err.
+ a.Error(fixture.GetAlgodClientForController(primaryNode).ReadyCheck())
+
+ status1, err := client1.Status()
+ a.NoError(err)
+ targetRound := status1.LastRound + 5
+
+ // Wait for the network to start making progress again
+ primaryNodeRestClient := fixture.GetAlgodClientForController(primaryNode)
+ err = fixture.ClientWaitForRound(primaryNodeRestClient, targetRound,
+ 10*catchpointCatchupProtocol.AgreementFilterTimeout)
+ a.NoError(err)
+
+ // The primary node has reached the target round,
+ // - the sync-time (aka catchup time should be 0.0)
+ // - the catchpoint should be empty (len == 0)
+ timer = time.NewTimer(100 * time.Second)
+
+ for {
+ err = primaryNodeRestClient.ReadyCheck()
+
+ if err != nil {
+ select {
+ case <-timer.C:
+ a.Fail("timeout")
+ break
+ default:
+ time.Sleep(250 * time.Millisecond)
+ continue
+ }
+ }
+
+ status1, err = client1.Status()
+ a.NoError(err)
+ a.Equal(status1.CatchupTime, uint64(0))
+ a.Empty(status1.Catchpoint)
+ break
+ }
+}
+
// TestNodeTxSyncRestart starts a two-node and one relay network
// Waits until a catchpoint is created
// Lets the primary node have the majority of the stake
@@ -556,20 +709,10 @@ func TestNodeTxSyncRestart(t *testing.T) {
consensus := make(config.ConsensusProtocols)
protoVersion := protocol.ConsensusCurrentVersion
catchpointCatchupProtocol := config.Consensus[protoVersion]
- catchpointCatchupProtocol.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- // MaxBalLookback = 2 x SeedRefreshInterval x SeedLookback
- // ref. https://github.com/algorandfoundation/specs/blob/master/dev/abft.md
- catchpointCatchupProtocol.SeedLookback = 2
- catchpointCatchupProtocol.SeedRefreshInterval = 2
- catchpointCatchupProtocol.MaxBalLookback = 2 * catchpointCatchupProtocol.SeedLookback * catchpointCatchupProtocol.SeedRefreshInterval
- catchpointCatchupProtocol.CatchpointLookback = catchpointCatchupProtocol.MaxBalLookback
- catchpointCatchupProtocol.EnableOnlineAccountCatchpoints = true
+ prevMaxTxnLife := catchpointCatchupProtocol.MaxTxnLife
+ applyCatchpointConsensusChanges(&catchpointCatchupProtocol)
+ catchpointCatchupProtocol.MaxTxnLife = prevMaxTxnLife
catchpointCatchupProtocol.StateProofInterval = 0
- if runtime.GOOS == "darwin" || runtime.GOARCH == "amd64" {
- // amd64/macos platforms are generally quite capable, so accelerate the round times to make the test run faster.
- catchpointCatchupProtocol.AgreementFilterTimeoutPeriod0 = 1 * time.Second
- catchpointCatchupProtocol.AgreementFilterTimeout = 1 * time.Second
- }
consensus[protoVersion] = catchpointCatchupProtocol
var fixture fixtures.RestClientFixture
diff --git a/test/e2e-go/features/catchup/stateproofsCatchup_test.go b/test/e2e-go/features/catchup/stateproofsCatchup_test.go
new file mode 100644
index 000000000..a0d20f5f1
--- /dev/null
+++ b/test/e2e-go/features/catchup/stateproofsCatchup_test.go
@@ -0,0 +1,286 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package catchup
+
+import (
+ "context"
+ "database/sql"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/libgoal"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/framework/fixtures"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/db"
+)
+
+func applyCatchpointStateProofConsensusChanges(consensusParams *config.ConsensusParams) {
+ // we decrease the StateProofStrengthTarget creating a "weak cert" to allow state proofs to be generated when the
+ // signed weight and proven weight are very close to each other.
+ consensusParams.StateProofStrengthTarget = 4
+ consensusParams.StateProofInterval = 8
+ consensusParams.StateProofVotersLookback = 2
+ consensusParams.EnableStateProofKeyregCheck = true
+ consensusParams.StateProofUseTrackerVerification = true
+}
+
+func getStateProofNextRound(a *require.Assertions, goalClient *libgoal.Client, round basics.Round) basics.Round {
+ block, err := goalClient.BookkeepingBlock(uint64(round))
+ a.NoError(err)
+ return block.StateProofTracking[protocol.StateProofBasic].StateProofNextRound
+}
+
+func TestStateProofInReplayCatchpoint(t *testing.T) {
+ // Overview of this test:
+ // Configure consensus to generate a state proof in the target catchpoint's replay rounds.
+ // i.e the node will have to "replay" the state proof transaction after fast catchup.
+ // Start a two-node network (primary has 100%, using has 0%)
+ // create a web proxy, have the using node use it as a peer, blocking all requests for round #2 and allowing everything else
+ // (This disables the node's ability to use regular catchup)
+ // Let it run until the first usable catchpoint, as computed in getFirstCatchpointRound, is generated.
+ // instruct the using node to fast catchup.
+ // wait until the using node is caught up to catchpointRound+1, skipping the "impossible" hole of round #2.
+ // Verify that the blocks replayed to the using node contained a state proof transaction.
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ if testing.Short() {
+ t.Skip()
+ }
+
+ a := require.New(fixtures.SynchronizedTest(t))
+
+ consensusParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ applyCatchpointConsensusChanges(&consensusParams)
+ applyCatchpointStateProofConsensusChanges(&consensusParams)
+
+ fixture := getFixture(&consensusParams)
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "CatchpointCatchupTestNetwork.json"))
+
+ primaryNode, primaryNodeRestClient, primaryErrorsCollector := startCatchpointGeneratingNode(a, fixture, "Primary")
+ defer primaryErrorsCollector.Print()
+ defer primaryNode.StopAlgod()
+
+ primaryNodeAddr, err := primaryNode.GetListeningAddress()
+ a.NoError(err)
+
+ usingNode, usingNodeRestClient, wp, usingNodeErrorsCollector := startCatchpointUsingNode(a, fixture, "Node", primaryNodeAddr)
+ defer usingNodeErrorsCollector.Print()
+ defer wp.Close()
+ defer usingNode.StopAlgod()
+
+ targetCatchpointRound := getFirstCatchpointRound(&consensusParams)
+
+ catchpointLabel, err := waitForCatchpointGeneration(fixture, primaryNodeRestClient, targetCatchpointRound)
+ a.NoError(err)
+
+ _, err = usingNodeRestClient.Catchup(catchpointLabel)
+ a.NoError(err)
+
+ // waiting for fastcatchup to start
+ attempt := 0
+ const sleepTime = 1 * time.Millisecond // too large duration makes catchup to complete
+ const maxAttempts = 500
+ for {
+ status, err := usingNodeRestClient.Status()
+ a.NoError(err)
+ if status.Catchpoint != nil && len(*status.Catchpoint) > 0 {
+ t.Logf("Fast catchup from %d to %s is in progress", status.LastRound, *status.Catchpoint)
+ break
+ }
+ if attempt > maxAttempts {
+ a.FailNow("Failed to start fast catchup in %d seconds", sleepTime*maxAttempts/1000)
+ }
+ time.Sleep(sleepTime)
+ attempt++
+ }
+
+ // wait for fastcatchup to complete and the node is synced
+ err = fixture.ClientWaitForRoundWithTimeout(usingNodeRestClient, uint64(targetCatchpointRound+1))
+ a.NoError(err)
+
+ primaryLibGoal := fixture.GetLibGoalClientFromNodeController(primaryNode)
+
+ dbRoundAfterCatchpoint := targetCatchpointRound - basics.Round(consensusParams.MaxBalLookback)
+ a.True(getStateProofNextRound(a, &primaryLibGoal, dbRoundAfterCatchpoint) < getStateProofNextRound(a, &primaryLibGoal, targetCatchpointRound),
+ "No state proof transaction in replay, rounds were %d to %d", dbRoundAfterCatchpoint+1, targetCatchpointRound)
+}
+
+func TestStateProofAfterCatchpoint(t *testing.T) {
+ // Overview of this test:
+ // Configure consensus to generate a state proof transaction after the target catchpoint round, with voters from before
+ // the target state proof round.
+ // Start a two-node network (primary has 100%, using has 0%)
+ // create a web proxy, have the using node use it as a peer, blocking all requests for round #2. ( and allowing everything else )
+ // Let it run until the first usable catchpoint, as computed in getFirstCatchpointRound, is generated.
+ // instruct the using node to catchpoint catchup from the proxy.
+ // wait until the using node is caught up to catchpointRound+1, skipping the "impossible" hole of round #2 and
+ // participating in consensus.
+ // Wait until the next state proof has most likely been generated.
+ // Verify that the state proof's voters data came from the state proof tracker and that the state proof transaction
+ // itself happened after catchpoint catchup was completed.
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ if testing.Short() {
+ t.Skip()
+ }
+ a := require.New(fixtures.SynchronizedTest(t))
+
+ consensusParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ applyCatchpointConsensusChanges(&consensusParams)
+ applyCatchpointStateProofConsensusChanges(&consensusParams)
+ consensusParams.StateProofInterval = 16
+ fixture := getFixture(&consensusParams)
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "CatchpointCatchupTestNetwork.json"))
+
+ primaryNode, primaryNodeRestClient, primaryErrorsCollector := startCatchpointGeneratingNode(a, fixture, "Primary")
+ defer primaryErrorsCollector.Print()
+ defer primaryNode.StopAlgod()
+
+ primaryNodeAddr, err := primaryNode.GetListeningAddress()
+ a.NoError(err)
+
+ usingNode, usingNodeRestClient, wp, usingNodeErrorsCollector := startCatchpointUsingNode(a, fixture, "Node", primaryNodeAddr)
+ defer usingNodeErrorsCollector.Print()
+ defer wp.Close()
+ defer usingNode.StopAlgod()
+
+ targetCatchpointRound := getFirstCatchpointRound(&consensusParams)
+
+ catchpointLabel, err := waitForCatchpointGeneration(fixture, primaryNodeRestClient, targetCatchpointRound)
+ a.NoError(err)
+
+ _, err = usingNodeRestClient.Catchup(catchpointLabel)
+ a.NoError(err)
+
+ roundAfterSPGeneration := targetCatchpointRound.RoundUpToMultipleOf(basics.Round(consensusParams.StateProofInterval)) +
+ basics.Round(consensusParams.StateProofInterval/2)
+ err = fixture.ClientWaitForRoundWithTimeout(usingNodeRestClient, uint64(roundAfterSPGeneration))
+ a.NoError(err)
+
+ primaryLibGoal := fixture.GetLibGoalClientFromNodeController(primaryNode)
+
+ dbRoundAfterCatchpoint := targetCatchpointRound - basics.Round(consensusParams.MaxBalLookback)
+ firstReplayRound := dbRoundAfterCatchpoint + 1
+ currentCoveredLastAttestedRound := getStateProofNextRound(a, &primaryLibGoal, roundAfterSPGeneration).SubSaturate(basics.Round(consensusParams.StateProofInterval))
+ votersRound := currentCoveredLastAttestedRound.SubSaturate(basics.Round(consensusParams.StateProofInterval))
+
+ // We do this to make sure the verification data came from the tracker.
+ a.True(votersRound < firstReplayRound)
+ a.True(currentCoveredLastAttestedRound > targetCatchpointRound)
+}
+
+func TestSendSigsAfterCatchpointCatchup(t *testing.T) {
+ // Overview of this test:
+ // Start a three-node network (primary has 80%, using has 10% and normal has 10%).
+ // Configure consensus to require the primary node and at least on other node to generate state proofs.
+ // Start the primary node and a normal node and wait for the network to reach round 3.
+ // We remove block number 2 from primary database, this will prevent node2 from catching up and force it to use fast-catchup
+ // Let it run until the first usable catchpoint, as computed in getFirstCatchpointRound, is generated.
+ // Run Node2
+ // wait until the using node is caught up to catchpointRound+1, skipping the "impossible" hole of round #2 and
+ // participating in consensus.
+ // Stop the normal node.
+ // Verify that a state proof transaction on which the normal node could not have signed is accepted.
+ partitiontest.PartitionTest(t)
+ defer fixtures.ShutdownSynchronizedTest(t)
+
+ if testing.Short() {
+ t.Skip()
+ }
+ a := require.New(fixtures.SynchronizedTest(t))
+
+ configurableConsensus := make(config.ConsensusProtocols)
+ consensusVersion := protocol.ConsensusVersion("catchpointtestingprotocol")
+ consensusParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ applyCatchpointStateProofConsensusChanges(&consensusParams)
+ applyCatchpointConsensusChanges(&consensusParams)
+ // Weight threshold allows creation of state proofs using the primary node and at least one other node.
+ consensusParams.StateProofWeightThreshold = (1 << 32) * 85 / 100
+ configurableConsensus[consensusVersion] = consensusParams
+
+ var fixture fixtures.RestClientFixture
+ fixture.SetConsensus(configurableConsensus)
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "ThreeNodesWithRichAcct.json"))
+
+ primaryNode, primaryNodeRestClient, primaryEC := startCatchpointGeneratingNode(a, &fixture, "Primary")
+ defer primaryEC.Print()
+ defer primaryNode.StopAlgod()
+ primaryNodeAddr, err := primaryNode.GetListeningAddress()
+ a.NoError(err)
+
+ err = fixture.ClientWaitForRoundWithTimeout(primaryNodeRestClient, 3)
+ a.NoError(err)
+
+ normalNode, normalNodeRestClient, normalNodeEC := startCatchpointNormalNode(a, &fixture, "Node1", primaryNodeAddr)
+ defer normalNodeEC.Print()
+ defer normalNode.StopAlgod()
+
+ err = fixture.ClientWaitForRoundWithTimeout(normalNodeRestClient, 3)
+ a.NoError(err)
+
+ // at this point PrimaryNode and Node1 would pass round 3. Before running Node2 we remove block 2 from Primary database.
+ // this will force Node2 to use fastcatchup
+ primNodeGenDir, err := primaryNode.GetGenesisDir()
+ a.NoError(err)
+ acc, err := db.MakeAccessor(filepath.Join(primNodeGenDir, "ledger.block.sqlite"), false, false)
+ require.NoError(t, err)
+ err = acc.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ _, err := tx.Exec("delete from blocks where rnd =2 ")
+ return err
+ })
+ require.NoError(t, err)
+ acc.Close()
+
+ usingNode, usingNodeRestClient, usingNodeEC := startCatchpointNormalNode(a, &fixture, "Node2", primaryNodeAddr)
+ defer usingNodeEC.Print()
+ defer usingNode.StopAlgod()
+
+ targetCatchpointRound := getFirstCatchpointRound(&consensusParams)
+
+ catchpointLabel, err := waitForCatchpointGeneration(&fixture, primaryNodeRestClient, targetCatchpointRound)
+ a.NoError(err)
+ _, err = usingNodeRestClient.Catchup(catchpointLabel)
+ a.NoError(err)
+
+ err = fixture.ClientWaitForRoundWithTimeout(usingNodeRestClient, uint64(targetCatchpointRound)+1)
+ a.NoError(err)
+
+ lastNormalRound, err := fixture.GetLibGoalClientFromNodeController(normalNode).CurrentRound()
+ a.NoError(err)
+ normalNode.StopAlgod()
+
+ // We wait until we know for sure that we're in a round that contains a state proof signed
+ // by the usingNode. we give the test 2*basics.Round(consensusParams.StateProofInterval) worth of time
+ // to prevent it from being flaky, since receiving signatures from the newly caught up node might take a while.
+ lastNormalNodeSignedRound := basics.Round(lastNormalRound).RoundDownToMultipleOf(basics.Round(consensusParams.StateProofInterval))
+ lastNormalNextStateProofRound := lastNormalNodeSignedRound + basics.Round(consensusParams.StateProofInterval)
+ targetRound := lastNormalNextStateProofRound + basics.Round(consensusParams.StateProofInterval*2)
+ err = fixture.ClientWaitForRoundWithTimeout(usingNodeRestClient, uint64(targetRound))
+ a.NoError(err)
+
+ primaryClient := fixture.GetLibGoalClientFromNodeController(primaryNode)
+ spNextRound := getStateProofNextRound(a, &primaryClient, targetRound)
+ a.True(spNextRound > lastNormalNextStateProofRound)
+}
diff --git a/test/e2e-go/features/devmode/devmode_test.go b/test/e2e-go/features/devmode/devmode_test.go
index ec1fa692b..728155ad3 100644
--- a/test/e2e-go/features/devmode/devmode_test.go
+++ b/test/e2e-go/features/devmode/devmode_test.go
@@ -32,7 +32,6 @@ import (
func TestDevMode(t *testing.T) {
partitiontest.PartitionTest(t)
- t.Skipf("Skipping flaky test. Re-enable with #3267")
if testing.Short() {
t.Skip()
@@ -50,18 +49,29 @@ func TestDevMode(t *testing.T) {
txn := fixture.SendMoneyAndWait(0, 100000, 1000, sender.Address, receiver.String(), "")
require.NotNil(t, txn.ConfirmedRound)
firstRound := *txn.ConfirmedRound + 1
- start := time.Now()
+ blk, err := fixture.AlgodClient.Block(*txn.ConfirmedRound)
+ require.NoError(t, err)
+ seconds := int64(blk.Block["ts"].(float64))
+ prevTime := time.Unix(seconds, 0)
+ // Set Block timestamp offset to test that consecutive txns properly get their block time set
+ const blkOffset = uint64(1_000_000)
+ err = fixture.AlgodClient.SetBlockTimestampOffset(blkOffset)
+ require.NoError(t, err)
+ resp, err := fixture.AlgodClient.GetBlockTimestampOffset()
+ require.NoError(t, err)
+ require.Equal(t, blkOffset, resp.Offset)
// 2 transactions should be sent within one normal confirmation time.
for i := uint64(0); i < 2; i++ {
- txn = fixture.SendMoneyAndWait(firstRound+i, 100000, 1000, sender.Address, receiver.String(), "")
- require.Equal(t, firstRound+i, txn.Txn.Txn.FirstValid)
+ round := firstRound + i
+ txn = fixture.SendMoneyAndWait(round, 100001, 1000, sender.Address, receiver.String(), "")
+ // SendMoneyAndWait subtracts 1 from firstValid
+ require.Equal(t, round-1, uint64(txn.Txn.Txn.FirstValid))
+ newBlk, err := fixture.AlgodClient.Block(round)
+ require.NoError(t, err)
+ newBlkSeconds := int64(newBlk.Block["ts"].(float64))
+ currTime := time.Unix(newBlkSeconds, 0)
+ require.Equal(t, currTime, prevTime.Add(1_000_000*time.Second))
+ prevTime = currTime
}
- require.True(t, time.Since(start) < 8*time.Second, "Transactions should be quickly confirmed faster than usual.")
-
- // Without transactions there should be no rounds even after a normal confirmation time.
- time.Sleep(10 * time.Second)
- status, err := fixture.LibGoalClient.Status()
- require.NoError(t, err)
- require.Equal(t, txn.ConfirmedRound, status.LastRound, "There should be no rounds without a transaction.")
}
diff --git a/test/e2e-go/features/followerNode/syncDeltas_test.go b/test/e2e-go/features/followerNode/syncDeltas_test.go
index 83c131512..2a8d2b961 100644
--- a/test/e2e-go/features/followerNode/syncDeltas_test.go
+++ b/test/e2e-go/features/followerNode/syncDeltas_test.go
@@ -43,7 +43,7 @@ func TestBasicSyncMode(t *testing.T) {
var fixture fixtures.RestClientFixture
// Give the second node (which starts up last) all the stake so that its proposal always has better credentials,
- // and so that its proposal isn't dropped. Otherwise the test burns 17s to recover. We don't care about stake
+ // and so that its proposal isn't dropped. Otherwise, the test burns 17s to recover. We don't care about stake
// distribution so this is fine.
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodesFollower100Second.json"))
defer fixture.Shutdown()
@@ -53,7 +53,6 @@ func TestBasicSyncMode(t *testing.T) {
a.NoError(err)
// Let the network make some progress
- a.NoError(err)
waitForRound := uint64(5)
err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound)
a.NoError(err)
@@ -68,6 +67,7 @@ func TestBasicSyncMode(t *testing.T) {
rResp, err := followClient.GetSyncRound()
a.NoError(err)
a.Equal(round, rResp.Round)
+ // make some progress to round
err = fixture.ClientWaitForRoundWithTimeout(followClient, round)
a.NoError(err)
// retrieve state delta
diff --git a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
index ed593b765..767ee86b3 100644
--- a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
+++ b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
@@ -124,8 +124,8 @@ func TestNewAccountCanGoOnlineAndParticipate(t *testing.T) {
shortPartKeysProtocol.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
shortPartKeysProtocol.SeedLookback = 2
shortPartKeysProtocol.SeedRefreshInterval = 8
- if runtime.GOARCH == "amd64" {
- // amd64 platforms are generally quite capable, so accelerate the round times to make the test run faster.
+ if runtime.GOARCH == "amd64" || runtime.GOARCH == "arm64" {
+ // amd64 and arm64 platforms are generally quite capable, so accelerate the round times to make the test run faster.
shortPartKeysProtocol.AgreementFilterTimeoutPeriod0 = 1 * time.Second
shortPartKeysProtocol.AgreementFilterTimeout = 1 * time.Second
}
diff --git a/test/e2e-go/features/participation/overlappingParticipationKeys_test.go b/test/e2e-go/features/participation/overlappingParticipationKeys_test.go
index 8e4df5fb3..7565a821a 100644
--- a/test/e2e-go/features/participation/overlappingParticipationKeys_test.go
+++ b/test/e2e-go/features/participation/overlappingParticipationKeys_test.go
@@ -65,8 +65,8 @@ func TestOverlappingParticipationKeys(t *testing.T) {
// new keys must exist at least 4 rounds prior use
shortPartKeysProtocol.SeedLookback = 2
shortPartKeysProtocol.SeedRefreshInterval = 1
- if runtime.GOARCH == "amd64" {
- // amd64 platforms are generally quite capable, so accelerate the round times to make the test run faster.
+ if runtime.GOARCH == "amd64" || runtime.GOARCH == "arm64" {
+ // amd64 and arm64 platforms are generally quite capable, so accelerate the round times to make the test run faster.
shortPartKeysProtocol.AgreementFilterTimeoutPeriod0 = 1 * time.Second
shortPartKeysProtocol.AgreementFilterTimeout = 1 * time.Second
}
diff --git a/test/e2e-go/features/stateproofs/stateproofs_test.go b/test/e2e-go/features/stateproofs/stateproofs_test.go
index e293006f4..4d4bc9f51 100644
--- a/test/e2e-go/features/stateproofs/stateproofs_test.go
+++ b/test/e2e-go/features/stateproofs/stateproofs_test.go
@@ -21,7 +21,6 @@ import (
"fmt"
"os"
"path/filepath"
- "runtime"
"strings"
"sync"
"sync/atomic"
@@ -113,7 +112,7 @@ func TestStateProofs(t *testing.T) {
var fixture fixtures.RestClientFixture
fixture.SetConsensus(configurableConsensus)
- if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
+ if testing.Short() {
fixture.Setup(t, filepath.Join("nettemplates", "StateProofSmall.json"))
} else {
fixture.Setup(t, filepath.Join("nettemplates", "StateProof.json"))
@@ -231,12 +230,8 @@ func TestStateProofOverlappingKeys(t *testing.T) {
var fixture fixtures.RestClientFixture
pNodes := 5
fixture.SetConsensus(configurableConsensus)
- if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
- fixture.Setup(t, filepath.Join("nettemplates", "StateProofSmall.json"))
- pNodes = 2
- } else {
- fixture.Setup(t, filepath.Join("nettemplates", "StateProof.json"))
- }
+ fixture.Setup(t, filepath.Join("nettemplates", "StateProof.json"))
+
defer fixture.Shutdown()
// Get node libgoal clients in order to update their participation keys
@@ -260,16 +255,14 @@ func TestStateProofOverlappingKeys(t *testing.T) {
var lastStateProofMessage stateproofmsg.Message
libgoalClient := fixture.LibGoalClient
- k, err := libgoalNodeClients[0].GetParticipationKeys()
- r.NoError(err)
- voteLastValid := k[0].Key.VoteLastValid
- expectedNumberOfStateProofs := uint64(10)
+ expectedNumberOfStateProofs := uint64(8)
for rnd := uint64(1); rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs+1); rnd++ {
- if rnd == voteLastValid-64 { // allow some buffer period before the voting keys are expired (for the keyreg to take effect)
+ if rnd == consensusParams.StateProofInterval*(5) { // allow some buffer period before the voting keys are expired (for the keyreg to take effect)
+ fmt.Println("at round.. installing", rnd)
// Generate participation keys (for the same accounts)
for i := 0; i < pNodes; i++ {
// Overlapping stateproof keys (the key for round 0 is valid up to 256)
- _, part, err := installParticipationKey(t, libgoalNodeClients[i], accounts[i], 0, 200)
+ _, part, err := installParticipationKey(t, libgoalNodeClients[i], accounts[i], 0, 400)
r.NoError(err)
participations[i] = part
}
@@ -286,7 +279,7 @@ func TestStateProofOverlappingKeys(t *testing.T) {
amount: 1,
}.sendPayment(r, &fixture, rnd)
- err = fixture.WaitForRound(rnd, timeoutUntilNextRound)
+ err := fixture.WaitForRound(rnd, timeoutUntilNextRound)
r.NoError(err)
blk, err := libgoalClient.BookkeepingBlock(rnd)
@@ -331,7 +324,7 @@ func TestStateProofMessageCommitmentVerification(t *testing.T) {
var fixture fixtures.RestClientFixture
fixture.SetConsensus(configurableConsensus)
- if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
+ if testing.Short() {
fixture.Setup(t, filepath.Join("nettemplates", "StateProofSmall.json"))
} else {
fixture.Setup(t, filepath.Join("nettemplates", "StateProof.json"))
@@ -380,8 +373,8 @@ func TestStateProofMessageCommitmentVerification(t *testing.T) {
}
func getDefaultStateProofConsensusParams() config.ConsensusParams {
- consensusParams := config.Consensus[protocol.ConsensusCurrentVersion]
- consensusParams.StateProofInterval = 16
+ consensusParams := config.Consensus[protocol.ConsensusFuture]
+
consensusParams.StateProofTopVoters = 1024
consensusParams.StateProofVotersLookback = 2
consensusParams.StateProofWeightThreshold = (1 << 32) * 30 / 100
@@ -391,6 +384,12 @@ func getDefaultStateProofConsensusParams() config.ConsensusParams {
consensusParams.AgreementFilterTimeout = 1500 * time.Millisecond
consensusParams.AgreementFilterTimeoutPeriod0 = 1500 * time.Millisecond
+ if testing.Short() {
+ consensusParams.StateProofInterval = 16
+ } else {
+ consensusParams.StateProofInterval = 32
+ }
+
return consensusParams
}
@@ -443,15 +442,15 @@ func verifyStateProofForRound(r *require.Assertions, fixture *fixtures.RestClien
return stateProofMessage, nextStateProofBlock
}
-// TestRecoverFromLaggingStateProofChain simulates a situation where the stateproof chain is lagging after the main chain.
+// TestStateProofRecoveryDuringRecoveryInterval simulates a situation where the stateproof chain is lagging after the main chain.
// If the missing data is being accepted before StateProofMaxRecoveryIntervals * StateProofInterval rounds have passed, nodes should
// be able to produce stateproofs and continue as normal
-func TestRecoverFromLaggingStateProofChain(t *testing.T) {
+func TestStateProofRecoveryDuringRecoveryPeriod(t *testing.T) {
partitiontest.PartitionTest(t)
defer fixtures.ShutdownSynchronizedTest(t)
- if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
- t.Skip("This test is difficult for ARM")
+ if testing.Short() {
+ t.Skip()
}
r := require.New(fixtures.SynchronizedTest(t))
@@ -542,14 +541,13 @@ func TestRecoverFromLaggingStateProofChain(t *testing.T) {
r.Equalf(int(consensusParams.StateProofInterval*expectedNumberOfStateProofs), int(lastStateProofBlock.Round()), "the expected last state proof block wasn't the one that was observed")
}
-// TestUnableToRecoverFromLaggingStateProofChain simulates a situation where the stateproof chain is lagging after the main chain.
-// unlike TestRecoverFromLaggingStateProofChain, in this test the node will start at a later round and the network will not be able to produce stateproofs/
-func TestUnableToRecoverFromLaggingStateProofChain(t *testing.T) {
+// TestStateProofRecovery test that the state proof chain can be recovered even after the StateProofMaxRecoveryIntervals has passed.
+func TestStateProofRecovery(t *testing.T) {
partitiontest.PartitionTest(t)
defer fixtures.ShutdownSynchronizedTest(t)
- if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
- t.Skip("This test is difficult for ARM")
+ if testing.Short() {
+ t.Skip()
}
r := require.New(fixtures.SynchronizedTest(t))
@@ -566,7 +564,12 @@ func TestUnableToRecoverFromLaggingStateProofChain(t *testing.T) {
// for that reason we need to the decrease the StateProofStrengthTarget creating a "weak cert"
consensusParams.StateProofWeightThreshold = (1 << 32) * 90 / 100
consensusParams.StateProofStrengthTarget = 4
- consensusParams.StateProofMaxRecoveryIntervals = 4
+ consensusParams.StateProofMaxRecoveryIntervals = 2
+ consensusParams.StateProofUseTrackerVerification = true
+ consensusParams.SeedLookback = 2
+ consensusParams.SeedRefreshInterval = 2
+ consensusParams.MaxBalLookback = 2 * consensusParams.SeedLookback * consensusParams.SeedRefreshInterval // 8
+ consensusParams.MaxTxnLife = 13
configurableConsensus[consensusVersion] = consensusParams
var fixture fixtures.RestClientFixture
@@ -585,10 +588,13 @@ func TestUnableToRecoverFromLaggingStateProofChain(t *testing.T) {
var lastStateProofBlock bookkeeping.Block
libgoal := fixture.LibGoalClient
- expectedNumberOfStateProofs := uint64(4)
- // Loop through the rounds enough to check for expectedNumberOfStateProofs state proofs
- for rnd := uint64(2); rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs+1); rnd++ {
- if rnd == (consensusParams.StateProofMaxRecoveryIntervals+2)*consensusParams.StateProofInterval {
+ var lastStateProofMessage stateproofmsg.Message
+
+ expectedNumberOfStateProofs := uint64(7)
+ numberOfGraceIntervals := uint64(3)
+ rnd := uint64(2)
+ for ; rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs); rnd++ {
+ if rnd == (consensusParams.StateProofMaxRecoveryIntervals+4)*consensusParams.StateProofInterval {
t.Logf("at round %d starting node\n", rnd)
dir, err = fixture.GetNodeDir("Node4")
r.NoError(err)
@@ -622,9 +628,46 @@ func TestUnableToRecoverFromLaggingStateProofChain(t *testing.T) {
if lastStateProofBlock.Round()+basics.Round(consensusParams.StateProofInterval) < blk.StateProofTracking[protocol.StateProofBasic].StateProofNextRound &&
lastStateProofBlock.Round() != 0 {
- r.FailNow("found a state proof at round %d", blk.Round())
+ nextStateProofRound := uint64(lastStateProofBlock.Round()) + consensusParams.StateProofInterval
+
+ t.Logf("found a state proof for round %d at round %d", nextStateProofRound, blk.Round())
+ // Find the state proof transaction
+ stateProofMessage, nextStateProofBlock := verifyStateProofForRound(r, &fixture, nextStateProofRound, lastStateProofMessage, lastStateProofBlock, consensusParams)
+ lastStateProofMessage = stateProofMessage
+ lastStateProofBlock = nextStateProofBlock
}
}
+
+ // at this point we expect the state proof chain to be completely caught up. However, In order to avoid flakiness on
+ // heavily loaded machines, we would wait some extra round for the state proofs to catch up
+ for ; rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs+numberOfGraceIntervals); rnd++ {
+
+ err = fixture.WaitForRound(rnd, timeoutUntilNextRound)
+ r.NoError(err)
+
+ blk, err := libgoal.BookkeepingBlock(rnd)
+ r.NoErrorf(err, "failed to retrieve block from algod on round %d", rnd)
+
+ if lastStateProofBlock.Round() == 0 {
+ lastStateProofBlock = blk
+ }
+
+ if lastStateProofBlock.Round()+basics.Round(consensusParams.StateProofInterval) < blk.StateProofTracking[protocol.StateProofBasic].StateProofNextRound &&
+ lastStateProofBlock.Round() != 0 {
+ nextStateProofRound := uint64(lastStateProofBlock.Round()) + consensusParams.StateProofInterval
+
+ t.Logf("found a state proof for round %d at round %d", nextStateProofRound, blk.Round())
+ // Find the state proof transaction
+ stateProofMessage, nextStateProofBlock := verifyStateProofForRound(r, &fixture, nextStateProofRound, lastStateProofMessage, lastStateProofBlock, consensusParams)
+ lastStateProofMessage = stateProofMessage
+ lastStateProofBlock = nextStateProofBlock
+ }
+
+ if int(consensusParams.StateProofInterval*expectedNumberOfStateProofs) <= int(lastStateProofBlock.Round()) {
+ return
+ }
+ }
+ r.Equalf(int(consensusParams.StateProofInterval*expectedNumberOfStateProofs), int(lastStateProofBlock.Round()), "the expected last state proof block wasn't the one that was observed")
}
// installParticipationKey generates a new key for a given account and installs it with the client.
@@ -702,7 +745,7 @@ func TestAttestorsChange(t *testing.T) {
from: accountFetcher{nodeName: "richNode", accountNumber: 0},
to: accountFetcher{nodeName: "poorNode", accountNumber: 0},
}
- sum := uint64(0)
+
for rnd := uint64(1); rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs+1); rnd++ {
// Changing the amount to pay. This should transfer most of the money from the rich node to the poor node.
if consensusParams.StateProofInterval*2 == rnd {
@@ -720,31 +763,15 @@ func TestAttestorsChange(t *testing.T) {
}
a.NoError(fixture.WaitForRound(rnd, timeoutUntilNextRound))
+
blk, err := libgoal.BookkeepingBlock(rnd)
a.NoErrorf(err, "failed to retrieve block from algod on round %d", rnd)
- // We sample the accounts' balances StateProofVotersLookback rounds before state proof round.
- if (rnd+consensusParams.StateProofVotersLookback)%consensusParams.StateProofInterval == 0 {
- sum = 0
- // the main part of the test (computing the total stake of the nodes):
- for i := 1; i <= 3; i++ {
- sum += accountFetcher{fmt.Sprintf("Node%d", i), 0}.getBalance(a, &fixture)
- }
-
- richNodeStake := accountFetcher{"richNode", 0}.getBalance(a, &fixture)
- poorNodeStake := accountFetcher{"poorNode", 0}.getBalance(a, &fixture)
- sum = sum + richNodeStake + poorNodeStake
- }
-
if (rnd % consensusParams.StateProofInterval) == 0 {
// Must have a merkle commitment for participants
a.True(len(blk.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment) > 0)
a.True(blk.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight != basics.MicroAlgos{})
- stake := blk.BlockHeader.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight.ToUint64()
-
- a.Equal(sum, stake)
-
// Special case: bootstrap validation with the first block
// that has a merkle root.
if lastStateProofBlock.Round() == 0 {
@@ -779,7 +806,6 @@ func TestTotalWeightChanges(t *testing.T) {
consensusParams.StateProofWeightThreshold = (1 << 32) * 90 / 100
consensusParams.StateProofStrengthTarget = 4
consensusParams.StateProofTopVoters = 4
- //consensusParams.StateProofInterval = 32
configurableConsensus := config.ConsensusProtocols{
protocol.ConsensusVersion("test-fast-stateproofs"): consensusParams,
@@ -787,7 +813,7 @@ func TestTotalWeightChanges(t *testing.T) {
var fixture fixtures.RestClientFixture
fixture.SetConsensus(configurableConsensus)
- if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
+ if testing.Short() {
fixture.Setup(t, filepath.Join("nettemplates", "RichAccountStateProofSmall.json"))
} else {
fixture.Setup(t, filepath.Join("nettemplates", "RichAccountStateProof.json"))
@@ -805,7 +831,7 @@ func TestTotalWeightChanges(t *testing.T) {
for rnd := uint64(1); rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs+1); rnd++ {
// Rich node goes offline
- if consensusParams.StateProofInterval*2-8 == rnd {
+ if consensusParams.StateProofInterval*2-(consensusParams.StateProofInterval/2) == rnd {
// subtract 8 rounds since the total online stake is calculated prior to the actual state proof round (lookback)
richNode.goOffline(a, &fixture, rnd)
}
@@ -1189,8 +1215,8 @@ func TestStateProofCheckTotalStake(t *testing.T) {
var lastStateProofBlock bookkeeping.Block
libgoalClient := fixture.LibGoalClient
- var totalSupplyAtRound [100]model.SupplyResponse
- var accountSnapshotAtRound [100][]model.Account
+ var totalSupplyAtRound [1000]model.SupplyResponse
+ var accountSnapshotAtRound [1000][]model.Account
for rnd := uint64(1); rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs+1); rnd++ {
if rnd == consensusParams.StateProofInterval+consensusParams.StateProofVotersLookback { // here we register the keys of address 0 so it won't be able the sign a state proof (its stake would be removed for the total)
diff --git a/test/e2e-go/features/teal/compile_test.go b/test/e2e-go/features/teal/compile_test.go
index 760551f28..da1538112 100644
--- a/test/e2e-go/features/teal/compile_test.go
+++ b/test/e2e-go/features/teal/compile_test.go
@@ -24,6 +24,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/test/framework/fixtures"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -50,7 +51,7 @@ func TestTealCompile(t *testing.T) {
// get lib goal client
libGoalClient := fixture.LibGoalFixture.GetLibGoalClientFromNodeController(primaryNode)
- compiledProgram, _, err := libGoalClient.Compile([]byte(""))
+ compiledProgram, _, _, err := libGoalClient.Compile([]byte(""), false)
a.Nil(compiledProgram)
a.Equal(err.Error(), "HTTP 404 Not Found: /teal/compile was not enabled in the configuration file by setting the EnableDeveloperAPI to true")
@@ -65,19 +66,22 @@ func TestTealCompile(t *testing.T) {
fixture.Start()
var hash crypto.Digest
- compiledProgram, hash, err = libGoalClient.Compile([]byte("int 1"))
+ var srcMap *logic.SourceMap
+ compiledProgram, hash, srcMap, err = libGoalClient.Compile([]byte("int 1"), true)
a.NotNil(compiledProgram)
a.NoError(err, "A valid v1 program should result in a compilation success")
+ a.NotNil(srcMap)
a.Equal([]byte{0x1, 0x20, 0x1, 0x1, 0x22}, compiledProgram)
a.Equal("6Z3C3LDVWGMX23BMSYMANACQOSINPFIRF77H7N3AWJZYV6OH6GWQ", hash.String())
- compiledProgram, hash, err = libGoalClient.Compile([]byte("#pragma version 2\nint 1"))
+ compiledProgram, hash, srcMap, err = libGoalClient.Compile([]byte("#pragma version 2\nint 1"), true)
a.NotNil(compiledProgram)
a.NoError(err, "A valid v2 program should result in a compilation success")
+ a.NotNil(srcMap)
a.Equal([]byte{0x2, 0x20, 0x1, 0x1, 0x22}, compiledProgram)
a.Equal("YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646A", hash.String())
- compiledProgram, hash, err = libGoalClient.Compile([]byte("bad program"))
+ compiledProgram, hash, _, err = libGoalClient.Compile([]byte("bad program"), false)
a.Error(err, "An invalid program should result in a compilation failure")
a.Nil(compiledProgram)
a.Equal(crypto.Digest{}, hash)
diff --git a/test/e2e-go/features/transactions/asset_test.go b/test/e2e-go/features/transactions/asset_test.go
index 3c6987b42..7715f01f4 100644
--- a/test/e2e-go/features/transactions/asset_test.go
+++ b/test/e2e-go/features/transactions/asset_test.go
@@ -549,7 +549,7 @@ func TestAssetGroupCreateSendDestroy(t *testing.T) {
a.NoError(err)
account1 := accountList[0]
- txCount := uint64(0)
+ txCount := uint64(1000) // starting with v38 tx count is initialized to 1000
fee := uint64(1000000)
manager := account0
diff --git a/test/e2e-go/restAPI/restClient_test.go b/test/e2e-go/restAPI/restClient_test.go
index 146d1585c..2ff0c08e5 100644
--- a/test/e2e-go/restAPI/restClient_test.go
+++ b/test/e2e-go/restAPI/restClient_test.go
@@ -1522,15 +1522,23 @@ end:
{encodeInt(12321), "int:12321", []byte{0, 1, 254, 3, 2}},
{[]byte{0, 248, 255, 32}, "b64:APj/IA==", []byte("lux56")},
}
+
for _, boxTest := range boxTests {
// Box values are 5 bytes, as defined by the test TEAL program.
operateBoxAndSendTxn("create", []string{string(boxTest.name)}, []string{""})
operateBoxAndSendTxn("set", []string{string(boxTest.name)}, []string{string(boxTest.value)})
+ currentRoundBeforeBoxes, err := testClient.CurrentRound()
+ a.NoError(err)
boxResponse, err := testClient.GetApplicationBoxByName(uint64(createdAppID), boxTest.encodedName)
a.NoError(err)
+ currentRoundAfterBoxes, err := testClient.CurrentRound()
+ a.NoError(err)
a.Equal(boxTest.name, boxResponse.Name)
a.Equal(boxTest.value, boxResponse.Value)
+ // To reduce flakiness, only check the round from boxes is within a range.
+ a.GreaterOrEqual(boxResponse.Round, currentRoundBeforeBoxes)
+ a.LessOrEqual(boxResponse.Round, currentRoundAfterBoxes)
}
const numberOfBoxesRemaining = uint64(3)
@@ -1557,3 +1565,426 @@ end:
assertBoxCount(numberOfBoxesRemaining)
}
+
+func TestSimulateTransaction(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ a := require.New(fixtures.SynchronizedTest(t))
+ var localFixture fixtures.RestClientFixture
+ localFixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
+ defer localFixture.Shutdown()
+
+ testClient := localFixture.LibGoalClient
+
+ _, err := testClient.WaitForRound(1)
+ a.NoError(err)
+
+ wh, err := testClient.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ addresses, err := testClient.ListAddresses(wh)
+ a.NoError(err)
+ senderBalance, senderAddress := getMaxBalAddr(t, testClient, addresses)
+ if senderAddress == "" {
+ t.Error("no addr with funds")
+ }
+ a.NoError(err)
+
+ toAddress := getDestAddr(t, testClient, nil, senderAddress, wh)
+ closeToAddress := getDestAddr(t, testClient, nil, senderAddress, wh)
+
+ // Ensure these accounts don't exist
+ receiverBalance, err := testClient.GetBalance(toAddress)
+ a.NoError(err)
+ a.Zero(receiverBalance)
+ closeToBalance, err := testClient.GetBalance(closeToAddress)
+ a.NoError(err)
+ a.Zero(closeToBalance)
+
+ txn, err := testClient.ConstructPayment(senderAddress, toAddress, 0, senderBalance/2, nil, closeToAddress, [32]byte{}, 0, 0)
+ a.NoError(err)
+ stxn, err := testClient.SignTransactionWithWallet(wh, nil, txn)
+ a.NoError(err)
+
+ currentRoundBeforeSimulate, err := testClient.CurrentRound()
+ a.NoError(err)
+
+ simulateRequest := v2.PreEncodedSimulateRequest{
+ TxnGroups: []v2.PreEncodedSimulateRequestTransactionGroup{
+ {
+ Txns: []transactions.SignedTxn{stxn},
+ },
+ },
+ }
+ result, err := testClient.SimulateTransactions(simulateRequest)
+ a.NoError(err)
+
+ currentAfterAfterSimulate, err := testClient.CurrentRound()
+ a.NoError(err)
+
+ // To reduce flakiness, only check the round from simulate is within a range.
+ a.GreaterOrEqual(result.LastRound, currentRoundBeforeSimulate)
+ a.LessOrEqual(result.LastRound, currentAfterAfterSimulate)
+
+ closingAmount := senderBalance - txn.Fee.Raw - txn.Amount.Raw
+ expectedResult := v2.PreEncodedSimulateResponse{
+ Version: 2,
+ LastRound: result.LastRound, // checked above
+ TxnGroups: []v2.PreEncodedSimulateTxnGroupResult{
+ {
+ Txns: []v2.PreEncodedSimulateTxnResult{
+ {
+ Txn: v2.PreEncodedTxInfo{
+ Txn: stxn,
+ ClosingAmount: &closingAmount,
+ },
+ },
+ },
+ },
+ },
+ }
+ a.Equal(expectedResult, result)
+
+ // Ensure the transaction did not actually get applied to the ledger
+ receiverBalance, err = testClient.GetBalance(toAddress)
+ a.NoError(err)
+ a.Zero(receiverBalance)
+ closeToBalance, err = testClient.GetBalance(closeToAddress)
+ a.NoError(err)
+ a.Zero(closeToBalance)
+}
+
+func TestSimulateWithOptionalSignatures(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ a := require.New(fixtures.SynchronizedTest(t))
+ var localFixture fixtures.RestClientFixture
+ localFixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
+ defer localFixture.Shutdown()
+
+ testClient := localFixture.LibGoalClient
+
+ _, err := testClient.WaitForRound(1)
+ a.NoError(err)
+
+ wh, err := testClient.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ addresses, err := testClient.ListAddresses(wh)
+ a.NoError(err)
+ _, senderAddress := getMaxBalAddr(t, testClient, addresses)
+ if senderAddress == "" {
+ t.Error("no addr with funds")
+ }
+ a.NoError(err)
+
+ txn, err := testClient.ConstructPayment(senderAddress, senderAddress, 0, 1, nil, "", [32]byte{}, 0, 0)
+ a.NoError(err)
+
+ simulateRequest := v2.PreEncodedSimulateRequest{
+ TxnGroups: []v2.PreEncodedSimulateRequestTransactionGroup{
+ {
+ Txns: []transactions.SignedTxn{{Txn: txn}}, // no signature
+ },
+ },
+ AllowEmptySignatures: true,
+ }
+ result, err := testClient.SimulateTransactions(simulateRequest)
+ a.NoError(err)
+
+ allowEmptySignatures := true
+ expectedResult := v2.PreEncodedSimulateResponse{
+ Version: 2,
+ LastRound: result.LastRound,
+ TxnGroups: []v2.PreEncodedSimulateTxnGroupResult{
+ {
+ Txns: []v2.PreEncodedSimulateTxnResult{
+ {
+ Txn: v2.PreEncodedTxInfo{
+ Txn: transactions.SignedTxn{Txn: txn},
+ },
+ },
+ },
+ },
+ },
+ EvalOverrides: &model.SimulationEvalOverrides{
+ AllowEmptySignatures: &allowEmptySignatures,
+ },
+ }
+ a.Equal(expectedResult, result)
+}
+
+func TestSimulateWithUnlimitedLog(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ a := require.New(fixtures.SynchronizedTest(t))
+ var localFixture fixtures.RestClientFixture
+ localFixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
+ defer localFixture.Shutdown()
+
+ testClient := localFixture.LibGoalClient
+
+ _, err := testClient.WaitForRound(1)
+ a.NoError(err)
+
+ wh, err := testClient.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ addresses, err := testClient.ListAddresses(wh)
+ a.NoError(err)
+ _, senderAddress := getMaxBalAddr(t, testClient, addresses)
+ if senderAddress == "" {
+ t.Error("no addr with funds")
+ }
+ a.NoError(err)
+
+ toAddress := getDestAddr(t, testClient, nil, senderAddress, wh)
+ closeToAddress := getDestAddr(t, testClient, nil, senderAddress, wh)
+
+ // Ensure these accounts don't exist
+ receiverBalance, err := testClient.GetBalance(toAddress)
+ a.NoError(err)
+ a.Zero(receiverBalance)
+ closeToBalance, err := testClient.GetBalance(closeToAddress)
+ a.NoError(err)
+ a.Zero(closeToBalance)
+
+ // construct program that uses a lot of log
+ prog := `#pragma version 8
+txn NumAppArgs
+int 0
+==
+bnz final
+`
+ for i := 0; i < 17; i++ {
+ prog += `byte "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+log
+`
+ }
+ prog += `final:
+int 1`
+ ops, err := logic.AssembleString(prog)
+ a.NoError(err)
+ approval := ops.Program
+ ops, err = logic.AssembleString("#pragma version 8\nint 1")
+ a.NoError(err)
+ clearState := ops.Program
+
+ gl := basics.StateSchema{}
+ lc := basics.StateSchema{}
+
+ // create app
+ appCreateTxn, err := testClient.MakeUnsignedApplicationCallTx(
+ 0, nil, nil, nil,
+ nil, nil, transactions.NoOpOC,
+ approval, clearState, gl, lc, 0,
+ )
+ a.NoError(err)
+ appCreateTxn, err = testClient.FillUnsignedTxTemplate(senderAddress, 0, 0, 0, appCreateTxn)
+ a.NoError(err)
+ // sign and broadcast
+ appCreateTxID, err := testClient.SignAndBroadcastTransaction(wh, nil, appCreateTxn)
+ a.NoError(err)
+ _, err = waitForTransaction(t, testClient, senderAddress, appCreateTxID, 30*time.Second)
+ a.NoError(err)
+
+ // get app ID
+ submittedAppCreateTxn, err := testClient.PendingTransactionInformation(appCreateTxID)
+ a.NoError(err)
+ a.NotNil(submittedAppCreateTxn.ApplicationIndex)
+ createdAppID := basics.AppIndex(*submittedAppCreateTxn.ApplicationIndex)
+ a.Greater(uint64(createdAppID), uint64(0))
+
+ // fund app account
+ appFundTxn, err := testClient.SendPaymentFromWallet(
+ wh, nil, senderAddress, createdAppID.Address().String(),
+ 0, 10_000_000, nil, "", 0, 0,
+ )
+ a.NoError(err)
+ appFundTxID := appFundTxn.ID()
+ _, err = waitForTransaction(t, testClient, senderAddress, appFundTxID.String(), 30*time.Second)
+ a.NoError(err)
+
+ // construct app call
+ appCallTxn, err := testClient.MakeUnsignedAppNoOpTx(
+ uint64(createdAppID), [][]byte{[]byte("first-arg")},
+ nil, nil, nil, nil,
+ )
+ a.NoError(err)
+ appCallTxn, err = testClient.FillUnsignedTxTemplate(senderAddress, 0, 0, 0, appCallTxn)
+ a.NoError(err)
+ appCallTxnSigned, err := testClient.SignTransactionWithWallet(wh, nil, appCallTxn)
+ a.NoError(err)
+
+ resp, err := testClient.SimulateTransactions(v2.PreEncodedSimulateRequest{
+ TxnGroups: []v2.PreEncodedSimulateRequestTransactionGroup{
+ {
+ Txns: []transactions.SignedTxn{appCallTxnSigned},
+ },
+ },
+ AllowMoreLogging: true,
+ })
+ a.NoError(err)
+
+ var logs [][]byte
+ for i := 0; i < 17; i++ {
+ logs = append(logs, []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))
+ }
+
+ budgetAdded, budgetUsed := uint64(700), uint64(40)
+ maxLogSize, maxLogCalls := uint64(65536), uint64(2048)
+
+ expectedResult := v2.PreEncodedSimulateResponse{
+ Version: 2,
+ LastRound: resp.LastRound,
+ EvalOverrides: &model.SimulationEvalOverrides{
+ MaxLogSize: &maxLogSize,
+ MaxLogCalls: &maxLogCalls,
+ },
+ TxnGroups: []v2.PreEncodedSimulateTxnGroupResult{
+ {
+ Txns: []v2.PreEncodedSimulateTxnResult{
+ {
+ Txn: v2.PreEncodedTxInfo{
+ Txn: appCallTxnSigned,
+ Logs: &logs,
+ },
+ AppBudgetConsumed: &budgetUsed,
+ },
+ },
+ AppBudgetAdded: &budgetAdded,
+ AppBudgetConsumed: &budgetUsed,
+ },
+ },
+ }
+ a.Equal(expectedResult, resp)
+}
+
+func TestSimulateWithExtraBudget(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ a := require.New(fixtures.SynchronizedTest(t))
+ var localFixture fixtures.RestClientFixture
+ localFixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
+ defer localFixture.Shutdown()
+
+ testClient := localFixture.LibGoalClient
+
+ _, err := testClient.WaitForRound(1)
+ a.NoError(err)
+
+ wh, err := testClient.GetUnencryptedWalletHandle()
+ a.NoError(err)
+ addresses, err := testClient.ListAddresses(wh)
+ a.NoError(err)
+ _, senderAddress := getMaxBalAddr(t, testClient, addresses)
+ if senderAddress == "" {
+ t.Error("no addr with funds")
+ }
+ a.NoError(err)
+
+ toAddress := getDestAddr(t, testClient, nil, senderAddress, wh)
+ closeToAddress := getDestAddr(t, testClient, nil, senderAddress, wh)
+
+ // Ensure these accounts don't exist
+ receiverBalance, err := testClient.GetBalance(toAddress)
+ a.NoError(err)
+ a.Zero(receiverBalance)
+ closeToBalance, err := testClient.GetBalance(closeToAddress)
+ a.NoError(err)
+ a.Zero(closeToBalance)
+
+ // construct program that uses a lot of budget
+ prog := `#pragma version 8
+txn ApplicationID
+bz end
+`
+ prog += strings.Repeat(`int 1; pop; `, 700)
+ prog += `end:
+int 1`
+
+ ops, err := logic.AssembleString(prog)
+ a.NoError(err)
+ approval := ops.Program
+ ops, err = logic.AssembleString("#pragma version 8\nint 1")
+ a.NoError(err)
+ clearState := ops.Program
+
+ gl := basics.StateSchema{}
+ lc := basics.StateSchema{}
+
+ // create app
+ appCreateTxn, err := testClient.MakeUnsignedApplicationCallTx(
+ 0, nil, nil, nil,
+ nil, nil, transactions.NoOpOC,
+ approval, clearState, gl, lc, 0,
+ )
+ a.NoError(err)
+ appCreateTxn, err = testClient.FillUnsignedTxTemplate(senderAddress, 0, 0, 0, appCreateTxn)
+ a.NoError(err)
+ // sign and broadcast
+ appCreateTxID, err := testClient.SignAndBroadcastTransaction(wh, nil, appCreateTxn)
+ a.NoError(err)
+ _, err = waitForTransaction(t, testClient, senderAddress, appCreateTxID, 30*time.Second)
+ a.NoError(err)
+
+ // get app ID
+ submittedAppCreateTxn, err := testClient.PendingTransactionInformation(appCreateTxID)
+ a.NoError(err)
+ a.NotNil(submittedAppCreateTxn.ApplicationIndex)
+ createdAppID := basics.AppIndex(*submittedAppCreateTxn.ApplicationIndex)
+ a.Greater(uint64(createdAppID), uint64(0))
+
+ // fund app account
+ appFundTxn, err := testClient.SendPaymentFromWallet(
+ wh, nil, senderAddress, createdAppID.Address().String(),
+ 0, 10_000_000, nil, "", 0, 0,
+ )
+ a.NoError(err)
+ appFundTxID := appFundTxn.ID()
+ _, err = waitForTransaction(t, testClient, senderAddress, appFundTxID.String(), 30*time.Second)
+ a.NoError(err)
+
+ // construct app call
+ appCallTxn, err := testClient.MakeUnsignedAppNoOpTx(
+ uint64(createdAppID), nil, nil, nil, nil, nil,
+ )
+ a.NoError(err)
+ appCallTxn, err = testClient.FillUnsignedTxTemplate(senderAddress, 0, 0, 0, appCallTxn)
+ a.NoError(err)
+ appCallTxnSigned, err := testClient.SignTransactionWithWallet(wh, nil, appCallTxn)
+ a.NoError(err)
+
+ extraBudget := uint64(704)
+ resp, err := testClient.SimulateTransactions(v2.PreEncodedSimulateRequest{
+ TxnGroups: []v2.PreEncodedSimulateRequestTransactionGroup{
+ {
+ Txns: []transactions.SignedTxn{appCallTxnSigned},
+ },
+ },
+ ExtraOpcodeBudget: extraBudget,
+ })
+ a.NoError(err)
+
+ budgetAdded, budgetUsed := uint64(1404), uint64(1404)
+
+ expectedResult := v2.PreEncodedSimulateResponse{
+ Version: 2,
+ LastRound: resp.LastRound,
+ EvalOverrides: &model.SimulationEvalOverrides{ExtraOpcodeBudget: &extraBudget},
+ TxnGroups: []v2.PreEncodedSimulateTxnGroupResult{
+ {
+ Txns: []v2.PreEncodedSimulateTxnResult{
+ {
+ Txn: v2.PreEncodedTxInfo{Txn: appCallTxnSigned},
+ AppBudgetConsumed: &budgetUsed,
+ },
+ },
+ AppBudgetAdded: &budgetAdded,
+ AppBudgetConsumed: &budgetUsed,
+ },
+ },
+ }
+ a.Equal(expectedResult, resp)
+}
diff --git a/test/framework/fixtures/restClientFixture.go b/test/framework/fixtures/restClientFixture.go
index 37d78797a..e0151630a 100644
--- a/test/framework/fixtures/restClientFixture.go
+++ b/test/framework/fixtures/restClientFixture.go
@@ -22,11 +22,11 @@ import (
"time"
"unicode"
+ "github.com/stretchr/testify/require"
+
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/protocol"
- "github.com/stretchr/testify/require"
-
"github.com/algorand/go-algorand/daemon/algod/api/client"
v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2"
"github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
diff --git a/test/scripts/e2e_subs/access-previous-scratch.sh b/test/scripts/e2e_subs/access-previous-scratch.sh
index 7c519ec43..f8dd54d96 100755
--- a/test/scripts/e2e_subs/access-previous-scratch.sh
+++ b/test/scripts/e2e_subs/access-previous-scratch.sh
@@ -17,7 +17,7 @@ gcmd="goal -w ${WALLET}"
ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
-APPID=$(${gcmd} app create --creator "${ACCOUNT}" --approval-prog=${TEAL}/scratch-rw.teal --global-byteslices 0 --global-ints 0 --local-byteslices 0 --local-ints 0 --clear-prog=${TEAL}/approve-all.teal | grep Created | awk '{ print $6 }')
+APPID=$(${gcmd} app create --creator "${ACCOUNT}" --approval-prog=${TEAL}/scratch-rw.teal --clear-prog=${TEAL}/approve-all.teal | grep Created | awk '{ print $6 }')
# Create app calls
function create_app_call {
diff --git a/test/scripts/e2e_subs/assets-app-b.sh b/test/scripts/e2e_subs/assets-app-b.sh
index 688b77371..168a35c91 100755
--- a/test/scripts/e2e_subs/assets-app-b.sh
+++ b/test/scripts/e2e_subs/assets-app-b.sh
@@ -39,7 +39,7 @@ XFER3=99999
XFER4=11
APP_CREATED_STR='Created app with app index'
-ERR_APP_CL_STR='only clearing out is supported for applications that do not exist'
+ERR_APP_CL_STR='only ClearState is supported for an application'
ERR_APP_NE_STR='application does not exist'
ERR_APP_OI_STR1='has not opted in to application'
ERR_APP_OI_STR2='not opted in to app'
diff --git a/test/scripts/e2e_subs/assets-app.sh b/test/scripts/e2e_subs/assets-app.sh
index 252512951..4c25cd663 100755
--- a/test/scripts/e2e_subs/assets-app.sh
+++ b/test/scripts/e2e_subs/assets-app.sh
@@ -39,7 +39,7 @@ XFER3=99999
XFER4=11
APP_CREATED_STR='Created app with app index'
-ERR_APP_CL_STR='only clearing out is supported for applications that do not exist'
+ERR_APP_CL_STR='only ClearState is supported for an application'
ERR_APP_NE_STR='application does not exist'
ERR_APP_OI_STR1='has not opted in to application'
ERR_APP_OI_STR2='not opted in to app'
diff --git a/test/scripts/e2e_subs/box-search.sh b/test/scripts/e2e_subs/box-search.sh
index 8803e8785..29ed02757 100755
--- a/test/scripts/e2e_subs/box-search.sh
+++ b/test/scripts/e2e_subs/box-search.sh
@@ -20,7 +20,7 @@ ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
# Version 8 clear program
printf '#pragma version 8\nint 1' > "${TEMPDIR}/clear.teal"
-APPID=$(${gcmd} app create --creator "$ACCOUNT" --approval-prog=${TEAL}/boxes.teal --clear-prog "$TEMPDIR/clear.teal" --global-byteslices 0 --global-ints 0 --local-byteslices 0 --local-ints 0 | grep Created | awk '{ print $6 }')
+APPID=$(${gcmd} app create --creator "$ACCOUNT" --approval-prog=${TEAL}/boxes.teal --clear-prog "$TEMPDIR/clear.teal" | grep Created | awk '{ print $6 }')
# Fund the app account 10 algos
APP_ACCOUNT=$(${gcmd} app info --app-id "$APPID" | grep "Application account" | awk '{print $3}')
@@ -38,6 +38,16 @@ EXPECTED="No box found for appid $APPID with name str:not_found"
[ "$BOX_INFO" = "$EXPECTED" ]
+# Confirm that we error for an invalid box name
+BOX_NAME="str:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+RES=$(${gcmd} app call --from "$ACCOUNT" --app-id "$APPID" --box "$BOX_NAME" --app-arg "str:create" --app-arg "$BOX_NAME" 2>&1 || true)
+EXPECTED="invalid : tx.Boxes[0].Name too long, max len 64 bytes"
+
+if [[ "$RES" != *"$EXPECTED" ]]; then
+ date "+${scriptname} unexpected response from goal app call with invalid box name %Y%m%d_%H%M%S"
+ false
+fi
+
# Create several boxes
BOX_NAMES=("str:box1" "str:with spaces" "b64:YmFzZTY0" "b64:AQIDBA==") # b64:YmFzZTY0 == str:base64, b64:AQIDBA== is not unicode
BOX_VALUE="box value"
diff --git a/test/scripts/e2e_subs/e2e-app-abi-arg.sh b/test/scripts/e2e_subs/e2e-app-abi-arg.sh
index c6f719a47..336e892c2 100755
--- a/test/scripts/e2e_subs/e2e-app-abi-arg.sh
+++ b/test/scripts/e2e_subs/e2e-app-abi-arg.sh
@@ -19,7 +19,7 @@ ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
printf '#pragma version 2\nint 1' > "${TEMPDIR}/simple.teal"
PROGRAM=($(${gcmd} clerk compile "${TEMPDIR}/simple.teal"))
-APPID=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog ${DIR}/tealprogs/app-abi-arg.teal --clear-prog ${TEMPDIR}/simple.teal --global-byteslices 0 --global-ints ${GLOBAL_INTS} --local-byteslices 0 --local-ints 0 | grep Created | awk '{ print $6 }')
+APPID=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog ${DIR}/tealprogs/app-abi-arg.teal --clear-prog ${TEMPDIR}/simple.teal --global-ints ${GLOBAL_INTS} | grep Created | awk '{ print $6 }')
# Should succeed to opt in with string "optin"
${gcmd} app optin --app-id $APPID --from $ACCOUNT --app-arg 'abi:string:"optin"'
diff --git a/test/scripts/e2e_subs/e2e-app-abi-method.sh b/test/scripts/e2e_subs/e2e-app-abi-method.sh
index ac5d4f592..b41d87b93 100755
--- a/test/scripts/e2e_subs/e2e-app-abi-method.sh
+++ b/test/scripts/e2e_subs/e2e-app-abi-method.sh
@@ -20,7 +20,7 @@ printf '#pragma version 2\nint 1' > "${TEMPDIR}/simple-v2.teal"
printf '#pragma version 3\nint 1' > "${TEMPDIR}/simple-v3.teal"
# Create
-RES=$(${gcmd} app method --method "create(uint64)uint64" --arg "1234" --create --approval-prog ${DIR}/tealprogs/app-abi-method-example.teal --clear-prog ${TEMPDIR}/simple-v2.teal --global-byteslices 0 --global-ints 0 --local-byteslices 1 --local-ints 0 --extra-pages 0 --from $ACCOUNT 2>&1 || true)
+RES=$(${gcmd} app method --method "create(uint64)uint64" --arg "1234" --create --approval-prog ${DIR}/tealprogs/app-abi-method-example.teal --clear-prog ${TEMPDIR}/simple-v2.teal --local-byteslices 1 --from $ACCOUNT 2>&1 || true)
EXPECTED="method create(uint64)uint64 succeeded with output: 2468"
if [[ $RES != *"${EXPECTED}"* ]]; then
date '+app-abi-method-test FAIL the method call to create(uint64)uint64 should not fail %Y%m%d_%H%M%S'
@@ -112,7 +112,7 @@ if [[ $RES != *"${EXPECTED}"* ]]; then
fi
# Foreign reference test during creation
-RES=$(${gcmd} app method --create --approval-prog ${DIR}/tealprogs/app-abi-method-example.teal --clear-prog ${TEMPDIR}/simple-v2.teal --global-byteslices 0 --global-ints 0 --local-byteslices 1 --local-ints 0 --extra-pages 0 --on-completion deleteapplication --method "referenceTest(account,application,account,asset,account,asset,asset,application,application)uint8[9]" --arg KGTOR3F3Q74JP4LB5M3SOCSJ4BOPOKZ2GPSLMLLGCWYWRXZJNN4LYQJXXU --arg 0 --arg $ACCOUNT --arg 10 --arg KGTOR3F3Q74JP4LB5M3SOCSJ4BOPOKZ2GPSLMLLGCWYWRXZJNN4LYQJXXU --arg 11 --arg 10 --arg 20 --arg 21 --app-account 2R5LMPTYLVMWYEG4RPI26PJAM7ARTGUB7LZSONQPGLUWTPOP6LQCJTQZVE --foreign-app 21 --foreign-asset 10 --from $ACCOUNT 2>&1 || true)
+RES=$(${gcmd} app method --create --approval-prog ${DIR}/tealprogs/app-abi-method-example.teal --clear-prog ${TEMPDIR}/simple-v2.teal --local-byteslices 1 --on-completion deleteapplication --method "referenceTest(account,application,account,asset,account,asset,asset,application,application)uint8[9]" --arg KGTOR3F3Q74JP4LB5M3SOCSJ4BOPOKZ2GPSLMLLGCWYWRXZJNN4LYQJXXU --arg 0 --arg $ACCOUNT --arg 10 --arg KGTOR3F3Q74JP4LB5M3SOCSJ4BOPOKZ2GPSLMLLGCWYWRXZJNN4LYQJXXU --arg 11 --arg 10 --arg 20 --arg 21 --app-account 2R5LMPTYLVMWYEG4RPI26PJAM7ARTGUB7LZSONQPGLUWTPOP6LQCJTQZVE --foreign-app 21 --foreign-asset 10 --from $ACCOUNT 2>&1 || true)
EXPECTED="method referenceTest(account,application,account,asset,account,asset,asset,application,application)uint8[9] succeeded with output: [2,0,2,0,2,1,0,1,0]"
if [[ $RES != *"${EXPECTED}"* ]]; then
date '+app-abi-method-test FAIL the creation method call to referenceTest(account,application,account,asset,account,asset,asset,application,application)uint8[9] should not fail %Y%m%d_%H%M%S'
diff --git a/test/scripts/e2e_subs/e2e-app-bootloader.sh b/test/scripts/e2e_subs/e2e-app-bootloader.sh
index 170a96f09..e9c22a65f 100755
--- a/test/scripts/e2e_subs/e2e-app-bootloader.sh
+++ b/test/scripts/e2e_subs/e2e-app-bootloader.sh
@@ -33,7 +33,7 @@ sed -i"" -e "s/TMPL_CLEARSTATE_HASH/${TARGET_HASH}/g" ${TEMPDIR}/bootloader.teal
# Create an app using filled-in bootloader template
printf '#pragma version 2\nint 1' > "${TEMPDIR}/int1.teal"
-APPID=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog ${TEMPDIR}/bootloader.teal --global-byteslices 1 --global-ints 0 --local-byteslices 0 --local-ints 0 --clear-prog "${TEMPDIR}/int1.teal" | grep Created | awk '{ print $6 }')
+APPID=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog ${TEMPDIR}/bootloader.teal --global-byteslices 1 --clear-prog "${TEMPDIR}/int1.teal" | grep Created | awk '{ print $6 }')
# Calling app without args and wrong OnCompletion should fail
EXPERROR='rejected by ApprovalProgram'
diff --git a/test/scripts/e2e_subs/e2e-app-delete-clear.sh b/test/scripts/e2e_subs/e2e-app-delete-clear.sh
index 9373c1ba8..2795f869f 100755
--- a/test/scripts/e2e_subs/e2e-app-delete-clear.sh
+++ b/test/scripts/e2e_subs/e2e-app-delete-clear.sh
@@ -20,7 +20,7 @@ PROGRAM_FILE="${TEMPDIR}/simple.teal"
GLOBAL_INTS=2
# Succeed in creating app with on-completion delete
-APPID=$(${gcmd} app create --creator ${ACCOUNT} --on-completion "DeleteApplication" --approval-prog "${PROGRAM_FILE}" --clear-prog "${PROGRAM_FILE}" --global-byteslices 0 --global-ints ${GLOBAL_INTS} --local-byteslices 0 --local-ints 0 | grep Created | awk '{ print $6 }')
+APPID=$(${gcmd} app create --creator ${ACCOUNT} --on-completion "DeleteApplication" --approval-prog "${PROGRAM_FILE}" --clear-prog "${PROGRAM_FILE}" --global-ints ${GLOBAL_INTS} | grep Created | awk '{ print $6 }')
# Check that the app is not created
APPID_CHECK=$(${gcmd} app info --app-id $APPID 2>&1 || true)
EXPERROR="application does not exist"
@@ -30,7 +30,7 @@ if [[ $APPID_CHECK != *"${EXPERROR}"* ]]; then
fi
# Fail if creating app with on-completion clear
-RES=$(${gcmd} app create --creator ${ACCOUNT} --on-completion "ClearState" --approval-prog "${PROGRAM_FILE}" --clear-prog "${PROGRAM_FILE}" --global-byteslices 0 --global-ints ${GLOBAL_INTS} --local-byteslices 0 --local-ints 0 2>&1 || true )
+RES=$(${gcmd} app create --creator ${ACCOUNT} --on-completion "ClearState" --approval-prog "${PROGRAM_FILE}" --clear-prog "${PROGRAM_FILE}" --global-ints ${GLOBAL_INTS} 2>&1 || true )
EXPERROR1='cannot clear state'
EXPERROR2='is not currently opted in'
if [[ $RES != *"${EXPERROR1}"*"${EXPERROR2}"* ]]; then
diff --git a/test/scripts/e2e_subs/e2e-app-extra-pages.sh b/test/scripts/e2e_subs/e2e-app-extra-pages.sh
index 1b9dd779c..b2d3cf870 100755
--- a/test/scripts/e2e_subs/e2e-app-extra-pages.sh
+++ b/test/scripts/e2e_subs/e2e-app-extra-pages.sh
@@ -52,7 +52,7 @@ generate_teal "$APPR_PROG" 4 3072 1 "int 0\nbalance\npop\n"
generate_teal "$BIG_APPR_PROG" 4 4098 1 "int 0\nbalance\npop\n"
# App create fails. Approval program too long
-RES=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog "${BIG_TEAL_FILE}" --clear-prog "${BIG_TEAL_FILE}" --global-byteslices 1 --global-ints 0 --local-byteslices 0 --local-ints 0 2>&1 || true)
+RES=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog "${BIG_TEAL_FILE}" --clear-prog "${BIG_TEAL_FILE}" --global-byteslices 1 2>&1 || true)
EXPERROR="approval program too long. max len 2048 bytes"
if [[ $RES != *"${EXPERROR}"* ]]; then
date '+app-extra-pages-test FAIL the application creation should fail %Y%m%d_%H%M%S'
@@ -60,7 +60,7 @@ if [[ $RES != *"${EXPERROR}"* ]]; then
fi
# App create fails. Clear state program too long
-RES=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog "${SMALL_TEAL_FILE}" --clear-prog "${BIG_TEAL_FILE}" --global-byteslices 1 --global-ints 0 --local-byteslices 0 --local-ints 0 2>&1 || true)
+RES=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog "${SMALL_TEAL_FILE}" --clear-prog "${BIG_TEAL_FILE}" --global-byteslices 1 2>&1 || true)
EXPERROR="clear state program too long. max len 2048 bytes"
if [[ $RES != *"${EXPERROR}"* ]]; then
date '+app-extra-pages-test FAIL the application creation should fail %Y%m%d_%H%M%S'
@@ -68,7 +68,7 @@ if [[ $RES != *"${EXPERROR}"* ]]; then
fi
# App create with extra pages, v3 teal
-RES=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog "${BIG_TEAL_FILE}" --clear-prog "${BIG_TEAL_FILE}" --extra-pages 3 --global-byteslices 1 --global-ints 0 --local-byteslices 0 --local-ints 0 2>&1 || true)
+RES=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog "${BIG_TEAL_FILE}" --clear-prog "${BIG_TEAL_FILE}" --extra-pages 3 --global-byteslices 1 2>&1 || true)
EXPERROR="pc=705 static cost budget of 700 exceeded"
if [[ $RES != *"${EXPERROR}"* ]]; then
date '+app-extra-pages-test FAIL the application creation should fail %Y%m%d_%H%M%S'
@@ -76,7 +76,7 @@ if [[ $RES != *"${EXPERROR}"* ]]; then
fi
# App create with extra pages, v4 teal
-RES=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog "${BIG_TEAL_V4_FILE}" --clear-prog "${BIG_TEAL_V4_FILE}" --extra-pages 3 --global-byteslices 1 --global-ints 0 --local-byteslices 0 --local-ints 0 2>&1 || true)
+RES=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog "${BIG_TEAL_V4_FILE}" --clear-prog "${BIG_TEAL_V4_FILE}" --extra-pages 3 --global-byteslices 1 2>&1 || true)
EXPERROR="pc=704 dynamic cost budget exceeded, executing intc_0: local program cost was 700"
if [[ $RES != *"${EXPERROR}"* ]]; then
date '+app-extra-pages-test FAIL the application creation should fail %Y%m%d_%H%M%S'
@@ -84,7 +84,7 @@ if [[ $RES != *"${EXPERROR}"* ]]; then
fi
# App create with extra pages, succeeded
-RES=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog "${SMALL_TEAL_FILE}" --clear-prog "${SMALL_TEAL_FILE}" --extra-pages 1 --global-byteslices 1 --global-ints 0 --local-byteslices 0 --local-ints 0 2>&1 || true)
+RES=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog "${SMALL_TEAL_FILE}" --clear-prog "${SMALL_TEAL_FILE}" --extra-pages 1 --global-byteslices 1 2>&1 || true)
EXP="Created app"
APPID=$(echo $RES | awk '{print $NF}')
if [[ $RES != *"${EXP}"* ]]; then
diff --git a/test/scripts/e2e_subs/e2e-app-simulate.sh b/test/scripts/e2e_subs/e2e-app-simulate.sh
new file mode 100755
index 000000000..7efc7ae3c
--- /dev/null
+++ b/test/scripts/e2e_subs/e2e-app-simulate.sh
@@ -0,0 +1,386 @@
+#!/bin/bash
+
+date '+app-simulate-test start %Y%m%d_%H%M%S'
+
+set -e
+set -x
+set -o pipefail
+set -o nounset
+export SHELLOPTS
+
+WALLET=$1
+
+# Directory of this bash program
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+
+gcmd="goal -w ${WALLET}"
+
+ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
+
+CONST_TRUE="true"
+CONST_FALSE="false"
+
+# First, try to send an extremely large "request" in the request body.
+# This should fail with a 413 error.
+# Some of our MacOS nightly tests fail for specifying the bs (block size)
+# value in capital letters (i.e. 11M), so just specify it as 1024 bytes and
+# allocate 11K blocks so we get a 11MB sized file.
+dd if=/dev/zero of="${TEMPDIR}/tooLargeRequest.json" bs=1024 count=11000
+RES=$(${gcmd} clerk simulate --request "${TEMPDIR}/tooLargeRequest.json" 2>&1 || true)
+EXPERROR="simulation error: HTTP 413 Request Entity Too Large:"
+if [[ $RES != *"${EXPERROR}"* ]]; then
+ date '+app-simulate-test FAIL the simulate API should fail for request bodies exceeding 10MB %Y%m%d_%H%M%S'
+ false
+fi
+
+##############################################
+# WE FIRST TEST TRANSACTION GROUP SIMULATION #
+##############################################
+
+${gcmd} clerk send -a 10000 -f ${ACCOUNT} -t ${ACCOUNT} -o "${TEMPDIR}/pay1.tx"
+${gcmd} clerk send -a 10000 -f ${ACCOUNT} -t ${ACCOUNT} -o "${TEMPDIR}/pay2.tx"
+
+cat "${TEMPDIR}/pay1.tx" "${TEMPDIR}/pay2.tx" | ${gcmd} clerk group -i - -o "${TEMPDIR}/grouped.tx"
+
+# We test transaction group simulation WITHOUT signatures with default arguments
+RES=$(${gcmd} clerk simulate -t "${TEMPDIR}/grouped.tx")
+if [[ $(echo "$RES" | jq '."txn-groups" | any(has("failure-message"))') != $CONST_TRUE ]]; then
+ date '+app-simulate-test FAIL the simulation transaction group without signatures not fail %Y%m%d_%H%M%S'
+ false
+fi
+
+# We test transaction group simulation WITHOUT signatures, but with allow-empty-signatures enabled
+RES=$(${gcmd} clerk simulate --allow-empty-signatures -t "${TEMPDIR}/grouped.tx")
+if [[ $(echo "$RES" | jq '."txn-groups" | any(has("failure-message"))') != $CONST_FALSE ]]; then
+ date '+app-simulate-test FAIL the simulation transaction group without signatures should not fail when allow-empty-signatures is true %Y%m%d_%H%M%S'
+ false
+fi
+
+# check the simulation eval overrides reports the right value
+if [[ $(echo "$RES" | jq '."eval-overrides"."allow-empty-signatures"') != $CONST_TRUE ]]; then
+ date '+app-simulate-test FAIL the simulation response should report eval overrides %Y%m%d_%H%M%S'
+ false
+fi
+
+# We then test transaction group simulation WITH signatures
+${gcmd} clerk split -i "${TEMPDIR}/grouped.tx" -o "${TEMPDIR}/grouped.tx"
+
+${gcmd} clerk sign -i "${TEMPDIR}/grouped-0.tx" -o "${TEMPDIR}/grouped-0.stx"
+${gcmd} clerk sign -i "${TEMPDIR}/grouped-1.tx" -o "${TEMPDIR}/grouped-1.stx"
+
+cat "${TEMPDIR}/grouped-0.stx" "${TEMPDIR}/grouped-1.stx" > "${TEMPDIR}/grouped.stx"
+
+RES=$(${gcmd} clerk simulate -t "${TEMPDIR}/grouped.stx")
+if [[ $(echo "$RES" | jq '."txn-groups" | any(has("failure-message"))') != $CONST_FALSE ]]; then
+ date '+app-simulate-test FAIL should pass to simulate self pay transaction group %Y%m%d_%H%M%S'
+ false
+fi
+
+if [[ $(echo "$RES" | jq 'has("eval-overrides")') != $CONST_FALSE ]]; then
+ date '+app-simulate-test FAIL the simulation response should not report eval overrides %Y%m%d_%H%M%S'
+ false
+fi
+
+# Test creating and using a simulate request object
+${gcmd} clerk simulate -t "${TEMPDIR}/grouped.stx" --request-only-out "${TEMPDIR}/simulateRequest.json"
+
+NUM_GROUPS=$(jq '."txn-groups" | length' < "${TEMPDIR}/simulateRequest.json")
+if [ $NUM_GROUPS -ne 1 ]; then
+ date '+app-simulate-test FAIL should have 1 transaction group in simulate request %Y%m%d_%H%M%S'
+ false
+fi
+
+NUM_TXNS=$(jq '."txn-groups"[0]."txns" | length' < "${TEMPDIR}/simulateRequest.json")
+if [ $NUM_TXNS -ne 2 ]; then
+ date '+app-simulate-test FAIL should have 2 transactions in simulate request %Y%m%d_%H%M%S'
+ false
+fi
+
+RES=$(${gcmd} clerk simulate --request "${TEMPDIR}/simulateRequest.json" | jq '."txn-groups" | any(has("failure-message"))')
+if [[ $RES != $CONST_FALSE ]]; then
+ date '+app-simulate-test FAIL should pass with raw simulate request %Y%m%d_%H%M%S'
+ false
+fi
+
+###############################################
+# WE ALSO TEST OVERSPEND IN TRANSACTION GROUP #
+###############################################
+
+${gcmd} clerk send -a 1000000000000000 -f ${ACCOUNT} -t ${ACCOUNT} -o "${TEMPDIR}/pay1.tx"
+${gcmd} clerk send -a 10000 -f ${ACCOUNT} -t ${ACCOUNT} -o "${TEMPDIR}/pay2.tx"
+
+cat "${TEMPDIR}/pay1.tx" "${TEMPDIR}/pay2.tx" | ${gcmd} clerk group -i - -o "${TEMPDIR}/grouped.tx"
+
+${gcmd} clerk split -i "${TEMPDIR}/grouped.tx" -o "${TEMPDIR}/grouped.tx"
+
+${gcmd} clerk sign -i "${TEMPDIR}/grouped-0.tx" -o "${TEMPDIR}/grouped-0.stx"
+${gcmd} clerk sign -i "${TEMPDIR}/grouped-1.tx" -o "${TEMPDIR}/grouped-1.stx"
+
+cat "${TEMPDIR}/grouped-0.stx" "${TEMPDIR}/grouped-1.stx" > "${TEMPDIR}/grouped.stx"
+
+RES=$(${gcmd} clerk simulate -t "${TEMPDIR}/grouped.stx")
+
+if [[ $(echo "$RES" | jq '."txn-groups" | any(has("failure-message"))') != $CONST_TRUE ]]; then
+ date '+app-simulate-test FAIL should FAIL for overspending in simulate self pay transaction group %Y%m%d_%H%M%S'
+ false
+fi
+
+OVERSPEND_INFO="overspend"
+
+if [[ $(echo "$RES" | jq '."txn-groups"[0]."failure-message"') != *"$OVERSPEND_INFO"* ]]; then
+ date '+app-simulate-test FAIL first overspending transaction in transaction group should contain message OVERSPEND %Y%m%d_%H%M%S'
+ false
+fi
+
+#######################################################
+# NOW WE TRY TO TEST SIMULATION WITH ABI METHOD CALLS #
+#######################################################
+
+printf '#pragma version 2\nint 1' > "${TEMPDIR}/simple-v2.teal"
+
+# Real Create
+RES=$(${gcmd} app method --method "create(uint64)uint64" --arg "1234" --create --approval-prog ${DIR}/tealprogs/app-abi-method-example.teal --clear-prog ${TEMPDIR}/simple-v2.teal --local-byteslices 1 --from $ACCOUNT 2>&1 || true)
+EXPECTED="method create(uint64)uint64 succeeded with output: 2468"
+if [[ $RES != *"${EXPECTED}"* ]]; then
+ date '+app-simulate-test FAIL the method call to create(uint64)uint64 should not fail %Y%m%d_%H%M%S'
+ false
+fi
+
+APPID=$(echo "$RES" | grep Created | awk '{ print $6 }')
+
+# SIMULATION! empty()void
+${gcmd} app method --method "empty()void" --app-id $APPID --from $ACCOUNT 2>&1 -o "${TEMPDIR}/empty.tx"
+
+# SIMULATE without a signature first
+RES=$(${gcmd} clerk simulate -t "${TEMPDIR}/empty.tx")
+# confirm that without signature, the simulation should fail with default args
+if [[ $(echo "$RES" | jq '."txn-groups" | any(has("failure-message"))') != $CONST_TRUE ]]; then
+ date '+app-simulate-test FAIL the simulation call to empty()void without signature should not succeed %Y%m%d_%H%M%S'
+ false
+fi
+
+RES=$(${gcmd} clerk simulate --allow-empty-signatures -t "${TEMPDIR}/empty.tx")
+# confirm that without signature, the simulation should pass with allow-empty-signatures
+if [[ $(echo "$RES" | jq '."txn-groups" | any(has("failure-message"))') != $CONST_FALSE ]]; then
+ date '+app-simulate-test FAIL the simulation call to empty()void without signature should succeed with allow-empty-signatures %Y%m%d_%H%M%S'
+ false
+fi
+
+# check the simulation eval overrides reports the right value
+if [[ $(echo "$RES" | jq '."eval-overrides"."allow-empty-signatures"') != $CONST_TRUE ]]; then
+ date '+app-simulate-test FAIL the simulation call to empty()void without signature should report eval overrides %Y%m%d_%H%M%S'
+ false
+fi
+
+# SIMULATE with a signature
+${gcmd} clerk sign -i "${TEMPDIR}/empty.tx" -o "${TEMPDIR}/empty.stx"
+RES=$(${gcmd} clerk simulate -t "${TEMPDIR}/empty.stx")
+
+# with signature, simulation app-call should succeed
+if [[ $(echo "$RES" | jq '."txn-groups" | any(has("failure-message"))') != $CONST_FALSE ]]; then
+ date '+app-simulate-test FAIL the simulation call to empty()void should succeed %Y%m%d_%H%M%S'
+ false
+fi
+
+if [[ $(echo "$RES" | jq 'has("eval-overrides")') != $CONST_FALSE ]]; then
+ date '+app-simulate-test FAIL the simulation call to empty()void should not report eval overrides %Y%m%d_%H%M%S'
+ false
+fi
+
+###########################################################
+# WE WANT TO FURTHER TEST UNLIMIT LOG IN SIMULATION WORKS #
+###########################################################
+
+TEAL=test/scripts/e2e_subs/tealprogs
+
+printf '#pragma version 6\nint 1' > "${TEMPDIR}/simple-v6.teal"
+
+# NOTE: logs-a-lot.teal contains a method that logs 1.4kb info, which is well over 1kb limit in binary
+# we test it here to see if the simulate unlimit log works under goal clerk simulate
+
+RES=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog "${TEAL}/logs-a-lot.teal" --clear-prog "${TEMPDIR}/simple-v6.teal" 2>&1 || true)
+EXPSUCCESS='Created app with app index'
+if [[ $RES != *"${EXPSUCCESS}"* ]]; then
+ date '+app-simulate-test FAIL the app creation for logs-a-lot.teal should succeed %Y%m%d_%H%M%S'
+ false
+fi
+
+APPID=$(echo "$RES" | grep Created | awk '{ print $6 }')
+
+# SIMULATION! without unlimiting log should call `small_log()void`
+${gcmd} app method --method "small_log()void" --app-id $APPID --from $ACCOUNT 2>&1 -o "${TEMPDIR}/small_log.tx"
+${gcmd} clerk sign -i "${TEMPDIR}/small_log.tx" -o "${TEMPDIR}/small_log.stx"
+RES=$(${gcmd} clerk simulate -t "${TEMPDIR}/small_log.stx")
+
+if [[ $(echo "$RES" | jq '."txn-groups" | any(has("failure-message"))') != $CONST_FALSE ]]; then
+ date '+app-simulate-test FAIL the app call to logs-a-lot.teal for small_log()void should not fail %Y%m%d_%H%M%S'
+ false
+fi
+
+EXPECTED_SMALL_LOG='yet another ephemeral log'
+
+if [[ $(echo "$RES" | jq '."txn-groups"[0]."txn-results"[0]."txn-result"."logs"[0] | @base64d') != *"${EXPECTED_SMALL_LOG}"* ]]; then
+ date '+app-simulate-test FAIL the app call to logs-a-lot.teal for small_log()void should have expected logs %Y%m%d_%H%M%S'
+ false
+fi
+
+if [[ $(echo "$RES" | jq 'has("eval-overrides")') != $CONST_FALSE ]]; then
+ date '+app-simulate-test FAIL the app call to logs-a-lot.teal without allow-more-logging should not return with eval-overrides field %Y%m%d_%H%M%S'
+ false
+fi
+
+${gcmd} app method --method "unlimited_log_test()void" --app-id $APPID --from $ACCOUNT 2>&1 -o "${TEMPDIR}/big_log.tx"
+${gcmd} clerk sign -i "${TEMPDIR}/big_log.tx" -o "${TEMPDIR}/big_log.stx"
+RES=$(${gcmd} clerk simulate -t "${TEMPDIR}/big_log.stx")
+
+if [[ $(echo "$RES" | jq '."txn-groups" | any(has("failure-message"))') != $CONST_TRUE ]]; then
+ date '+app-simulate-test FAIL the app call to logs-a-lot.teal for unlimited_log_test()void would-succeed should be false without unlimiting log %Y%m%d_%H%M%S'
+ false
+fi
+
+EXPECTED_FAILURE='logic eval error: too many log calls in program. up to 32 is allowed.'
+
+if [[ $(echo "$RES" | jq '."txn-groups"[0]."failure-message"') != *"${EXPECTED_FAILURE}"* ]]; then
+ date '+app-simulate-test FAIL the app call to logs-a-lot.teal for unlimited_log_test()void should fail without unlmited log option %Y%m%d_%H%M%S'
+ false
+fi
+
+if [[ $(echo "$RES" | jq 'has("eval-overrides")') != $CONST_FALSE ]]; then
+ date '+app-simulate-test FAIL the app call to logs-a-lot.teal without allow-more-logging should not return with eval-overrides field %Y%m%d_%H%M%S'
+ false
+fi
+
+# SIMULATION! with unlimiting log should call `unlimited_log_test()void`
+${gcmd} app method --method "unlimited_log_test()void" --app-id $APPID --from $ACCOUNT 2>&1 -o "${TEMPDIR}/big_log.tx"
+${gcmd} clerk sign -i "${TEMPDIR}/big_log.tx" -o "${TEMPDIR}/big_log.stx"
+RES=$(${gcmd} clerk simulate --allow-more-logging -t "${TEMPDIR}/big_log.stx")
+
+if [[ $(echo "$RES" | jq '."txn-groups" | any(has("failure-message"))') != $CONST_FALSE ]]; then
+ date '+app-simulate-test FAIL the app call to logs-a-lot.teal for unlimited_log_test()void should not fail with unlimiting log %Y%m%d_%H%M%S'
+ false
+fi
+
+if [[ $(echo "$RES" | jq '."txn-groups"[0]."failed-at"') != null ]]; then
+ date '+app-simulate-test FAIL the app call to logs-a-lot.teal for unlimited_log_test()void should succeed with unlmited log option %Y%m%d_%H%M%S'
+ false
+fi
+
+if [[ $(echo "$RES" | jq '."eval-overrides"."max-log-size"') -ne 65536 ]]; then
+ date '+app-simulate-test FAIL the app call to logs-a-lot.teal with unlimited log should return max log size 65536 %Y%m%d_%H%M%S'
+ false
+fi
+
+if [[ $(echo "$RES" | jq '."eval-overrides"."max-log-calls"') -ne 2048 ]]; then
+ date '+app-simulate-test FAIL the app call to logs-a-lot.teal with unlimited log should return max log calls 2048 %Y%m%d_%H%M%S'
+ false
+fi
+
+EXPECTED_FIRST_LINE_BIG_LOG='The time has come,'
+
+if [[ $(echo "$RES" | jq '."txn-groups"[0]."txn-results"[0]."txn-result"."logs"[0] | @base64d') != *"${EXPECTED_FIRST_LINE_BIG_LOG}"* ]]; then
+ date '+app-simulate-test FAIL the app call to logs-a-lot.teal for unlimited_log_test()void should succeed %Y%m%d_%H%M%S'
+ false
+fi
+
+EXPECTED_LAST_LINE_BIG_LOG='Those of the largest size,'
+
+if [[ $(echo "$RES" | jq '."txn-groups"[0]."txn-results"[0]."txn-result"."logs"[-1] | @base64d') != *"${EXPECTED_LAST_LINE_BIG_LOG}"* ]]; then
+ date '+app-simulate-test FAIL the app call to logs-a-lot.teal for unlimited_log_test()void should succeed %Y%m%d_%H%M%S'
+ false
+fi
+
+############################################################
+# WE WANT TO FURTHER TEST EXTRA BUDGET IN SIMULATION WORKS #
+############################################################
+
+function generate_teal() {
+ FILE=$1
+ VERSION=$2
+ REPETITION=$3
+
+ printf '#pragma version %d\n txn ApplicationID\n bz end\n' $VERSION > "${FILE}"
+
+ # iterating in interval [0, REPETITION - 1]
+ for i in $(seq 0 1 $(expr $REPETITION - 1)); do
+ printf "int 1\npop\n" >> "${FILE}"
+ done
+
+ printf "end:\n int 1\n" >> "${FILE}"
+}
+
+BIG_TEAL_FILE="$TEMPDIR/int-pop-400-cost-a-lot.teal"
+generate_teal "$BIG_TEAL_FILE" 8 400
+
+printf '#pragma version 8\nint 1' > "${TEMPDIR}/simple-v8.teal"
+
+RES=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog "${BIG_TEAL_FILE}" --clear-prog "${TEMPDIR}/simple-v8.teal" --extra-pages 1 2>&1 || true)
+EXPSUCCESS='Created app with app index'
+if [[ $RES != *"${EXPSUCCESS}"* ]]; then
+ date '+app-simulate-test FAIL the app creation for generated large TEAL should succeed %Y%m%d_%H%M%S'
+ false
+fi
+
+APPID=$(echo "$RES" | grep Created | awk '{ print $6 }')
+
+# SIMULATION! without extra budget should fail direct call
+${gcmd} app call --app-id $APPID --from $ACCOUNT 2>&1 -o "${TEMPDIR}/no-extra-opcode-budget.tx"
+${gcmd} clerk sign -i "${TEMPDIR}/no-extra-opcode-budget.tx" -o "${TEMPDIR}/no-extra-opcode-budget.stx"
+RES=$(${gcmd} clerk simulate -t "${TEMPDIR}/no-extra-opcode-budget.stx")
+
+if [[ $(echo "$RES" | jq '."txn-groups" | any(has("failure-message"))') != $CONST_TRUE ]]; then
+ date '+app-simulate-test FAIL the app call to generated large TEAL without extra budget should fail %Y%m%d_%H%M%S'
+ false
+fi
+
+EXPECTED_FAILURE='dynamic cost budget exceeded'
+
+if [[ $(echo "$RES" | jq '."txn-groups"[0]."failure-message"') != *"${EXPECTED_FAILURE}"* ]]; then
+ date '+app-simulate-test FAIL the app call to generated large TEAL should fail %Y%m%d_%H%M%S'
+ false
+fi
+
+# SIMULATION! with extra budget should pass direct call
+RES=$(${gcmd} clerk simulate --extra-opcode-budget 200 -t "${TEMPDIR}/no-extra-opcode-budget.stx")
+
+if [[ $(echo "$RES" | jq '."txn-groups" | any(has("failure-message"))') != $CONST_FALSE ]]; then
+ date '+app-simulate-test FAIL the app call to generated large TEAL with extra budget should pass %Y%m%d_%H%M%S'
+ false
+fi
+
+if [[ $(echo "$RES" | jq '."eval-overrides"."extra-opcode-budget"') -ne 200 ]]; then
+ date '+app-simulate-test FAIL the app call to generated large TEAL should have extra-opcode-budget 200 %Y%m%d_%H%M%S'
+ false
+fi
+
+if [[ $(echo "$RES" | jq '."txn-groups"[0]."app-budget-added"') -ne 900 ]]; then
+ date '+app-simulate-test FAIL the app call to generated large TEAL should have app-budget-added 900 %Y%m%d_%H%M%S'
+ false
+fi
+
+if [[ $(echo "$RES" | jq '."txn-groups"[0]."app-budget-consumed"') -ne 804 ]]; then
+ date '+app-simulate-test FAIL the app call to generated large TEAL should be consuming 804 budget %Y%m%d_%H%M%S'
+ false
+fi
+
+# SIMULATION! with --allow-more-opcode-budget should pass direct call
+RES=$(${gcmd} clerk simulate --allow-more-opcode-budget -t "${TEMPDIR}/no-extra-opcode-budget.stx")
+
+if [[ $(echo "$RES" | jq '."txn-groups" | any(has("failure-message"))') != $CONST_FALSE ]]; then
+ date '+app-simulate-test FAIL the app call to generated large TEAL with extra budget should pass %Y%m%d_%H%M%S'
+ false
+fi
+
+if [[ $(echo "$RES" | jq '."eval-overrides"."extra-opcode-budget"') -ne 320000 ]]; then
+ date '+app-simulate-test FAIL the app call to generated large TEAL should have extra-opcode-budget 320000 %Y%m%d_%H%M%S'
+ false
+fi
+
+if [[ $(echo "$RES" | jq '."txn-groups"[0]."app-budget-added"') -ne 320700 ]]; then
+ date '+app-simulate-test FAIL the app call to generated large TEAL should have app-budget-added 320700 %Y%m%d_%H%M%S'
+ false
+fi
+
+if [[ $(echo "$RES" | jq '."txn-groups"[0]."app-budget-consumed"') -ne 804 ]]; then
+ date '+app-simulate-test FAIL the app call to generated large TEAL should be consuming 804 budget %Y%m%d_%H%M%S'
+ false
+fi
diff --git a/test/scripts/e2e_subs/e2e-app-stateful-global.sh b/test/scripts/e2e_subs/e2e-app-stateful-global.sh
index eac38d313..4b892116d 100755
--- a/test/scripts/e2e_subs/e2e-app-stateful-global.sh
+++ b/test/scripts/e2e_subs/e2e-app-stateful-global.sh
@@ -51,7 +51,7 @@ ${gcmd} app delete --app-id $APPID --app-arg "str:hello" --from $ACCOUNT
# Check should fail since we can't find program to execute
RES=$(${gcmd} app call --app-id $APPID --app-arg "str:check" --app-arg "str:bar" --from $ACCOUNT 2>&1 || true)
-EXPERROR='only clearing out is supported'
+EXPERROR='only ClearState is supported'
if [[ $RES != *"${EXPERROR}"* ]]; then
date '+app-create-test FAIL app call should fail if app has been deleted %Y%m%d_%H%M%S'
false
diff --git a/test/scripts/e2e_subs/e2e-app-stateful-local.sh b/test/scripts/e2e_subs/e2e-app-stateful-local.sh
index 4f9201499..83859bcc9 100755
--- a/test/scripts/e2e_subs/e2e-app-stateful-local.sh
+++ b/test/scripts/e2e_subs/e2e-app-stateful-local.sh
@@ -59,7 +59,7 @@ ${gcmd} app delete --app-id $APPID --app-arg "str:hello" --from $ACCOUNT
# Check should fail since we can't find program to execute
RES=$(${gcmd} app call --app-id $APPID --app-arg "str:check" --app-arg "str:bar" --from $ACCOUNT 2>&1 || true)
-EXPERROR='only clearing out is supported'
+EXPERROR='only ClearState is supported'
if [[ $RES != *"${EXPERROR}"* ]]; then
date '+app-create-test FAIL app call should fail if app has been deleted %Y%m%d_%H%M%S'
false
diff --git a/test/scripts/e2e_subs/e2e-app-x-app-reads.sh b/test/scripts/e2e_subs/e2e-app-x-app-reads.sh
index 9b42e9e26..86b61339e 100755
--- a/test/scripts/e2e_subs/e2e-app-x-app-reads.sh
+++ b/test/scripts/e2e_subs/e2e-app-x-app-reads.sh
@@ -21,7 +21,7 @@ APPID=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog ${DIR}/tealprogs
# Creating an app that attempts to read APPID's global state without setting
# foreignapps should fail
-EXPERR="invalid App reference 1"
+EXPERR="App index 1 beyond txn.ForeignApps"
RES=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog ${DIR}/tealprogs/xappreads.teal --global-byteslices 0 --global-ints 0 --local-byteslices 0 --local-ints 0 --clear-prog <(printf '#pragma version 2\nint 1') 2>&1 || true)
if [[ $RES != *"$EXPERR"* ]]; then
date '+x-app-reads FAIL expected disallowed foreign global read to fail %Y%m%d_%H%M%S'
diff --git a/test/scripts/e2e_subs/goal-app-create-state-defaults.sh b/test/scripts/e2e_subs/goal-app-create-state-defaults.sh
new file mode 100755
index 000000000..c77faa64d
--- /dev/null
+++ b/test/scripts/e2e_subs/goal-app-create-state-defaults.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+scriptname="goal-app-create-state-defaults"
+date "+${scriptname} start %Y%m%d_%H%M%S"
+
+set -e
+set -x
+set -o pipefail
+export SHELLOPTS
+
+WALLET=$1
+
+# Directory of this bash program
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+
+gcmd="goal -w ${WALLET}"
+
+ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
+
+printf '#pragma version 2\nint 1' > "${TEMPDIR}/simple.teal"
+
+# Check goal flags --global-byteslices, --global-ints, --local-byteslices, --local-ints. We want to
+# ensure that omitting these flags has the same effect as setting them to 0.
+
+APP_CREATE_TXN_NO_STATE_FILE="${TEMPDIR}/create_no_state.txn"
+APP_CREATE_TXN_NO_FULLY_SPECIFIED_FILE="${TEMPDIR}/create_fully_specified.txn"
+
+# Checks for 'goal app create'
+
+# Passing a note is needed because goal will sometimes try to customize a note
+# to avoid duplicate txns
+
+${gcmd} app create --note "hello" --creator "${ACCOUNT}" --approval-prog "${TEMPDIR}/simple.teal" --clear-prog "${TEMPDIR}/simple.teal" --out "${APP_CREATE_TXN_NO_STATE_FILE}"
+APP_CREATE_TXN_NO_STATE=$(msgpacktool -d < "${APP_CREATE_TXN_NO_STATE_FILE}")
+
+FIRSTVALID=$(echo $APP_CREATE_TXN_NO_STATE | jq ".txn.fv")
+
+# Passing --firstvalid is used for subsequent transactions to ensure they have
+# the same valid range as the first txn
+
+${gcmd} app create --note "hello" --creator "${ACCOUNT}" --approval-prog "${TEMPDIR}/simple.teal" --clear-prog "${TEMPDIR}/simple.teal" --global-byteslices 0 --global-ints 0 --local-byteslices 0 --local-ints 0 --firstvalid $FIRSTVALID --out "${APP_CREATE_TXN_NO_FULLY_SPECIFIED_FILE}"
+APP_CREATE_TXN_NO_FULLY_SPECIFIED=$(msgpacktool -d < "${APP_CREATE_TXN_NO_FULLY_SPECIFIED_FILE}")
+
+if [ "$APP_CREATE_TXN_NO_FULLY_SPECIFIED" != "$APP_CREATE_TXN_NO_STATE" ]; then
+ date "+${scriptname} transactions made with 'goal app create' are not equal %Y%m%d_%H%M%S"
+ false
+fi
+
+# Checks for 'goal method --create'
+
+${gcmd} app method --create --note "hello" --from "${ACCOUNT}" --method "create(uint64)uint64" --arg "1234" --create --approval-prog "${TEMPDIR}/simple.teal" --clear-prog "${TEMPDIR}/simple.teal" --out "${APP_CREATE_TXN_NO_STATE_FILE}"
+APP_CREATE_TXN_NO_STATE=$(msgpacktool -d < "${APP_CREATE_TXN_NO_STATE_FILE}")
+
+FIRSTVALID=$(echo $APP_CREATE_TXN_NO_STATE | jq ".txn.fv")
+
+${gcmd} app method --create --note "hello" --from "${ACCOUNT}" --method "create(uint64)uint64" --arg "1234" --create --approval-prog "${TEMPDIR}/simple.teal" --clear-prog "${TEMPDIR}/simple.teal" --global-byteslices 0 --global-ints 0 --local-byteslices 0 --local-ints 0 --firstvalid $FIRSTVALID --out "${APP_CREATE_TXN_NO_FULLY_SPECIFIED_FILE}"
+APP_CREATE_TXN_NO_FULLY_SPECIFIED=$(msgpacktool -d < "${APP_CREATE_TXN_NO_FULLY_SPECIFIED_FILE}")
+
+if [ "$APP_CREATE_TXN_NO_FULLY_SPECIFIED" != "$APP_CREATE_TXN_NO_STATE" ]; then
+ date "+${scriptname} transactions made with 'goal method --create' are not equal %Y%m%d_%H%M%S"
+ false
+fi
+
+date "+${scriptname} OK %Y%m%d_%H%M%S"
diff --git a/test/scripts/e2e_subs/sectok-app.sh b/test/scripts/e2e_subs/sectok-app.sh
index 5ed3e2d5e..8e87c57c3 100755
--- a/test/scripts/e2e_subs/sectok-app.sh
+++ b/test/scripts/e2e_subs/sectok-app.sh
@@ -40,7 +40,7 @@ XFER4=11
VERY_LATE=9999999999
APP_CREATED_STR='Created app with app index'
-ERR_APP_CL_STR='only clearing out is supported for applications that do not exist'
+ERR_APP_CL_STR='only ClearState is supported for an application'
ERR_APP_NE_STR='application does not exist'
ERR_APP_OI_STR1='has not opted in to application'
ERR_APP_OI_STR2='not opted in to app'
diff --git a/test/scripts/e2e_subs/shared-resources.py b/test/scripts/e2e_subs/shared-resources.py
new file mode 100755
index 000000000..d4c1c9594
--- /dev/null
+++ b/test/scripts/e2e_subs/shared-resources.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+
+import os
+import sys
+import algosdk.encoding as enc
+import algosdk.future.transaction as txn
+from goal import Goal
+
+from datetime import datetime
+
+stamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+print(f"{os.path.basename(sys.argv[0])} start {stamp}")
+
+goal = Goal(sys.argv[1], autosend=True)
+
+joe = goal.new_account()
+
+txinfo, err = goal.pay(goal.account, joe, amt=500_000)
+assert not err, err
+
+putTeal = """
+#pragma version 8
+txn ApplicationID
+bz end
+
+txn ApplicationArgs 0
+byte 0x1032
+txn ApplicationArgs 1
+btoi
+app_local_put
+
+end: int 1
+"""
+
+txinfo, err = goal.app_create(joe, goal.assemble(putTeal),
+ local_schema=(2, 0))
+assert not err, err
+app_id = txinfo['application-index']
+assert app_id
+
+print("goal.account: ", goal.account)
+print("joe: ", joe)
+
+goal.autosend = False
+grp1 = goal.app_call(goal.account, app_id,
+ on_complete=txn.OnComplete.OptInOC,
+ app_args=[enc.decode_address(goal.account), 10])
+grp2 = goal.app_call(joe, app_id,
+ app_args=[enc.decode_address(goal.account), 20])
+[grp1_info, grp2_info], err = goal.send_group([grp1, grp2])
+
+# Won't work, because v8 can't modify an account (goal.account) that
+# isn't in the `grp2` txn
+assert err
+assert "invalid Account reference "+goal.account in str(err)
+
+# Now, upgrade program to same thing, but v9
+
+optin = goal.app_call(joe, app_id,
+ on_complete=txn.OnComplete.OptInOC,
+ app_args=[enc.decode_address(joe), 40])
+optin_info, err = goal.send(optin)
+assert not err, err
+
+putTealV9 = putTeal.replace("#pragma version 8", "#pragma version 9")
+
+update = goal.app_call(joe, app_id,
+ on_complete=txn.OnComplete.UpdateApplicationOC,
+ approval_program=goal.assemble(putTealV9),
+ clear_program=goal.assemble(putTealV9),
+ app_args=[enc.decode_address(joe), 50])
+update_info, err = goal.send(update)
+assert not err, err
+
+# Works now, because a v9 program is allowed to modify a "non-local"
+# account. Under the covers, the txn gets a "SharedAccts" array, and
+# the index points there. But the REST API hides that.
+grp1 = goal.app_call(goal.account, app_id,
+ on_complete=txn.OnComplete.OptInOC,
+ app_args=[enc.decode_address(goal.account), 60])
+grp2 = goal.app_call(joe, app_id,
+ app_args=[enc.decode_address(goal.account), 70])
+[grp1_info, grp2_info], err = goal.send_group([grp1, grp2])
+assert not err, err
+
+# Both txns should have a local-state-delta that modified
+# goal.account, even though that would have been impossible in v8
+# because goal.account does not appear in the `grp2` transaction.
+assert len(grp1_info["local-state-delta"]) == 1
+assert grp1_info["local-state-delta"][0]["address"] == goal.account
+assert grp1_info["local-state-delta"][0]["delta"][0]["value"]["uint"] == 60
+
+assert len(grp2_info["local-state-delta"]) == 1
+assert grp2_info["local-state-delta"][0]["address"] == goal.account
+assert grp2_info["local-state-delta"][0]["delta"][0]["value"]["uint"] == 70
+
+print(f"{os.path.basename(sys.argv[0])} OK {stamp}")
diff --git a/test/scripts/e2e_subs/teal-creatable-id.sh b/test/scripts/e2e_subs/teal-creatable-id.sh
index 50dff53b3..b461e11ee 100755
--- a/test/scripts/e2e_subs/teal-creatable-id.sh
+++ b/test/scripts/e2e_subs/teal-creatable-id.sh
@@ -17,7 +17,7 @@ gcmd="goal -w ${WALLET}"
ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
-APPID=$(${gcmd} app create --creator "${ACCOUNT}" --approval-prog=${TEAL}/check_creatable_id.teal --global-byteslices 0 --global-ints 0 --local-byteslices 0 --local-ints 0 --clear-prog=${TEAL}/approve-all.teal --app-arg=str:skipcreation | grep Created | awk '{ print $6 }')
+APPID=$(${gcmd} app create --creator "${ACCOUNT}" --approval-prog=${TEAL}/check_creatable_id.teal --clear-prog=${TEAL}/approve-all.teal --app-arg=str:skipcreation | grep Created | awk '{ print $6 }')
# ==============================
# > Asset and application test
@@ -30,7 +30,7 @@ ${gcmd} asset create --creator "${ACCOUNT}" --total 1000 --unitname "" --assetur
${gcmd} app call --app-id="$APPID" --from="$ACCOUNT" --app-arg=str:skipcreation --app-arg=int:0 --out "$TEMPDIR/unsigned_asset_check_app_call.txn"
# Create app transaction
-${gcmd} app create --creator "${ACCOUNT}" --approval-prog=${TEAL}/approve-all.teal --global-byteslices 0 --global-ints 0 --local-byteslices 0 --local-ints 0 --clear-prog=${TEAL}/approve-all.teal --out "$TEMPDIR/unsigned_app_create.txn"
+${gcmd} app create --creator "${ACCOUNT}" --approval-prog=${TEAL}/approve-all.teal --clear-prog=${TEAL}/approve-all.teal --out "$TEMPDIR/unsigned_app_create.txn"
# App call transaction to check app creatable ID
${gcmd} app call --app-id="$APPID" --from="$ACCOUNT" --app-arg=str:skipcreation --app-arg=int:2 --out "$TEMPDIR/unsigned_app_check_app_call.txn"
diff --git a/test/scripts/e2e_subs/tealprogs/logs-a-lot.teal b/test/scripts/e2e_subs/tealprogs/logs-a-lot.teal
new file mode 100644
index 000000000..5618275d6
--- /dev/null
+++ b/test/scripts/e2e_subs/tealprogs/logs-a-lot.teal
@@ -0,0 +1,141 @@
+#pragma version 6
+txn NumAppArgs
+int 0
+==
+bnz main_l6
+txna ApplicationArgs 0
+method "small_log()void"
+==
+bnz main_l5
+txna ApplicationArgs 0
+method "unlimited_log_test()void"
+==
+bnz main_l4
+err
+main_l4:
+txn OnCompletion
+int NoOp
+==
+txn ApplicationID
+int 0
+!=
+&&
+assert
+callsub unlimitedlogtest_1
+int 1
+return
+main_l5:
+txn OnCompletion
+int NoOp
+==
+txn ApplicationID
+int 0
+!=
+&&
+assert
+callsub smalllog_0
+int 1
+return
+main_l6:
+txn OnCompletion
+int NoOp
+==
+bnz main_l8
+err
+main_l8:
+txn ApplicationID
+int 0
+==
+assert
+int 1
+return
+
+// small_log
+smalllog_0:
+byte "yet another ephemeral log"
+log
+retsub
+
+// unlimited_log_test
+unlimitedlogtest_1:
+byte "The time has come,"
+log
+byte "To talk of many things"
+log
+byte "Of shoes--and ships--and sealing-wax--"
+log
+byte "Of cabbages--and kings--"
+log
+byte "And why the sea is boiling hot--"
+log
+byte "And whether pigs have wings."
+log
+byte "But wait a bit, the Oysters cried,"
+log
+byte "Before we have our chat"
+log
+byte "For some of us are out of breath,"
+log
+byte "And all of us are fat!"
+log
+byte "No hurry! said the Carpenter."
+log
+byte "They thanked him much for that."
+log
+byte "A loaf of bread, the Walrus said,"
+log
+byte "Is what we chiefly need"
+log
+byte "Pepper and vinegar besides"
+log
+byte "Are very good indeed--"
+log
+byte "Now if youre ready, Oysters dear,"
+log
+byte "We can begin to feed."
+log
+byte "But not on us! the Oysters cried,"
+log
+byte "Turning a little blue."
+log
+byte "After such kindness, that would be"
+log
+byte "A dismal thing to do!"
+log
+byte "The night is fine, the Walrus said."
+log
+byte "Do you admire the view?"
+log
+byte "It was so kind of you to come!"
+log
+byte "And you are very nice!"
+log
+byte "The Carpenter said nothing but"
+log
+byte "Cut us another slice"
+log
+byte "I wish you were not quite so deaf--"
+log
+byte "Ive had to ask you twice!"
+log
+byte "It seems a shame, the Walrus said,"
+log
+byte "To play them such a trick,"
+log
+byte "After weve brought them out so far,"
+log
+byte "And made them trot so quick!"
+log
+byte "The Carpenter said nothing but"
+log
+byte "The butters spread too thick!"
+log
+byte "I weep for you, the Walrus said"
+log
+byte "I deeply sympathize."
+log
+byte "With sobs and tears he sorted out"
+log
+byte "Those of the largest size,"
+log
+retsub \ No newline at end of file
diff --git a/test/testdata/consensus/catchpointtestingprotocol.json b/test/testdata/consensus/catchpointtestingprotocol.json
index c591dce17..842238868 100644
--- a/test/testdata/consensus/catchpointtestingprotocol.json
+++ b/test/testdata/consensus/catchpointtestingprotocol.json
@@ -99,6 +99,7 @@
"SupportTransactionLeases": true,
"SupportTxGroups": true,
"TxnCounter": true,
+ "EnableCatchpointsWithSPContexts": true,
"UpgradeThreshold": 9000,
"UpgradeVoteRounds": 10000
}
diff --git a/test/testdata/deployednettemplates/recipes/custom/configs/nonPartNode.json b/test/testdata/deployednettemplates/recipes/custom/configs/nonPartNode.json
index 5b0a52d9d..42d491b2e 100644
--- a/test/testdata/deployednettemplates/recipes/custom/configs/nonPartNode.json
+++ b/test/testdata/deployednettemplates/recipes/custom/configs/nonPartNode.json
@@ -1,5 +1,5 @@
{
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0 }"
}
diff --git a/test/testdata/deployednettemplates/recipes/custom/configs/relay.json b/test/testdata/deployednettemplates/recipes/custom/configs/relay.json
index 25bb6b5a2..2f621f1a2 100644
--- a/test/testdata/deployednettemplates/recipes/custom/configs/relay.json
+++ b/test/testdata/deployednettemplates/recipes/custom/configs/relay.json
@@ -7,5 +7,5 @@
"TelemetryURI": "{{TelemetryURI}}",
"EnableMetrics": true,
"MetricsURI": "{{MetricsURI}}",
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"<network>.algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"EnableRuntimeMetrics\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }"
}
diff --git a/test/testdata/nettemplates/RichAccountStateProof.json b/test/testdata/nettemplates/RichAccountStateProof.json
index e908ec807..578fb0e3c 100644
--- a/test/testdata/nettemplates/RichAccountStateProof.json
+++ b/test/testdata/nettemplates/RichAccountStateProof.json
@@ -2,7 +2,7 @@
"Genesis": {
"NetworkName": "tbd",
"ConsensusProtocol": "test-fast-stateproofs",
- "LastPartKeyRound": 100,
+ "LastPartKeyRound": 500,
"Wallets": [
{ "Name": "richWallet", "Stake": 39, "Online": true },
{ "Name": "Wallet1", "Stake": 20, "Online": true },
diff --git a/test/testdata/nettemplates/StateProof.json b/test/testdata/nettemplates/StateProof.json
index 1194af643..97f416560 100644
--- a/test/testdata/nettemplates/StateProof.json
+++ b/test/testdata/nettemplates/StateProof.json
@@ -2,7 +2,7 @@
"Genesis": {
"NetworkName": "tbd",
"ConsensusProtocol": "test-fast-stateproofs",
- "LastPartKeyRound": 100,
+ "LastPartKeyRound": 500,
"Wallets": [
{ "Name": "Wallet0", "Stake": 20, "Online": true },
{ "Name": "Wallet1", "Stake": 20, "Online": true },
diff --git a/test/testdata/nettemplates/ThreeNodesWithRichAcct.json b/test/testdata/nettemplates/ThreeNodesWithRichAcct.json
new file mode 100644
index 000000000..548da60f6
--- /dev/null
+++ b/test/testdata/nettemplates/ThreeNodesWithRichAcct.json
@@ -0,0 +1,48 @@
+{
+ "Genesis": {
+ "NetworkName": "tbd",
+ "ConsensusProtocol": "catchpointtestingprotocol",
+ "LastPartKeyRound": 3000,
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 80,
+ "Online": true
+ },
+ {
+ "Name": "Wallet2",
+ "Stake": 10,
+ "Online": true
+ },
+ {
+ "Name": "Wallet3",
+ "Stake": 10,
+ "Online": true
+ }
+ ]
+ },
+ "Nodes": [
+ {
+ "Name": "Primary",
+ "IsRelay": true,
+ "Wallets": [
+ { "Name": "Wallet1",
+ "ParticipationOnly": false }
+ ]
+ },
+ {
+ "Name": "Node1",
+ "Wallets": [
+ { "Name": "Wallet2",
+ "ParticipationOnly": false }
+ ]
+ },
+ {
+ "Name": "Node2",
+ "Wallets": [
+ { "Name": "Wallet3",
+ "ParticipationOnly": false }
+ ]
+ }
+ ]
+}
diff --git a/tools/README.md b/tools/README.md
index b8394ac77..f7ecf655a 100644
--- a/tools/README.md
+++ b/tools/README.md
@@ -2,19 +2,23 @@
Various tools and utilities that don't have a better place to go.
-### debug
+## debug
Tools for debugging algod. These were really useful before launch when we spent a lot of time analyzing node behavior, but aren't needed as much recently.
-### misc
+## misc
Small tools that are useful in niche situations.
-### network
+## network
This is a go package used by some of the CLI commands.
-### TEAL
+## TEAL
Some of the earliest tools we built for working with TEAL programs. A lot of these are workarounds for things that no longer need to be worked around, but these are still nice examples of what TEAL can do.
+## x-repo-types
+
+A tool for checking that `go-algorand` types get serialized in the same way as
+purportedly clone types in other Algorand repositories.
diff --git a/tools/block-generator/README.md b/tools/block-generator/README.md
new file mode 100644
index 000000000..93e62d546
--- /dev/null
+++ b/tools/block-generator/README.md
@@ -0,0 +1,161 @@
+# Block Generator
+
+This tool is used for testing Conduit import performance. It does this by generating synthetic blocks which are sent by mocking the Algod REST API endpoints that Conduit uses.
+
+## Scenario Configuration
+
+Block generator uses a YAML config file to describe the composition of each randomly generated block. There are three levels of configuration:
+
+1. Setup
+2. Transaction type distribution
+3. Transaction type specific configuration
+
+At the time of writing, the block generator supports **payment** and **asset** transactions. The settings are hopefully, more or less, obvious. Distributions are specified as fractions of 1.0, and the sum of all options must add up to 1.0.
+
+Here is an example which uses all of the current options. Notice that the synthetic blocks are not required to follow algod limits, in this case the block size is specified as 19999:
+
+```yml
+name: "Mixed (19,999)"
+genesis_accounts: 10000
+genesis_account_balance: 1000000000000
+tx_per_block: 19999
+
+# transaction distribution
+tx_pay_fraction: 0.3
+tx_asset_fraction: 0.7
+
+# payment config
+pay_acct_create_fraction: 0.02
+pay_xfer_fraction: 0.98
+
+# asset config
+asset_create_fraction: 0.001
+asset_optin_fraction: 0.1
+asset_close_fraction: 0.05
+asset_xfer_fraction: 0.849
+asset_delete_fraction: 0
+```
+
+## Modes
+
+The block generator can run in one of two _modes_:
+
+1. standalone **daemon**
+2. test suite **runner**
+
+### daemon
+
+In standalone daemon mode, a block-generator process starts and exposes the mock algod endpoints for **/genesis** and **/v2/blocks/{block}**. If you choose to query them manually, it only supports fetching blocks sequentially. This is due to the fact that it generates a pseudorandom stream of transactions and after each random transaction the state increments to the next.
+
+Here is the help output for **daemon**:
+
+```bash
+~$ ./block-generator daemon -h
+Start the generator daemon in standalone mode.
+
+Usage:
+ block-generator daemon [flags]
+
+Flags:
+ -c, --config string Specify the block configuration yaml file.
+ -h, --help help for daemon
+ -p, --port uint Port to start the server at. (default 4010)
+```
+
+### runner
+
+The runner mode is well suited for running the same set of tests consistently across many scenarios and for different releases. The runner mode automates this process by starting the **daemon** with many different configurations, managing a postgres database, and running a separate Conduit process configured to use them.
+
+The results of the testing are written to the directory specified by the **--report-directory** option, and include many different metrics. In addition to the report, the Conduit log is written to this directory. The files are named according to the scenario file, and end in "report" or "log".
+
+Here is an example report from running with a test duration of "1h":
+
+```json
+test_duration_seconds:3600
+test_duration_actual_seconds:3600.056457
+transaction_pay_total:30024226
+transaction_pay_create_total:614242
+early_average_import_time_sec:2.13
+early_cumulative_import_time_sec:1083.26
+early_average_imported_tx_per_block:19999.00
+early_cumulative_imported_tx_per_block:10179491
+early_average_block_upload_time_sec:NaN
+early_cumulative_block_upload_time_sec:0.00
+early_average_postgres_eval_time_sec:0.33
+early_cumulative_postgres_eval_time_sec:167.41
+early_imported_round:509
+early_overall_transactions_per_second:9397.09
+early_uptime_seconds:3600.06
+final_average_import_time_sec:2.35
+final_cumulative_import_time_sec:3602.62
+final_average_imported_tx_per_block:19999.00
+final_cumulative_imported_tx_per_block:30598470
+final_average_block_upload_time_sec:NaN
+final_cumulative_block_upload_time_sec:0.00
+final_average_postgres_eval_time_sec:0.33
+final_cumulative_postgres_eval_time_sec:507.38
+final_imported_round:1530
+final_overall_transactions_per_second:8493.40
+final_uptime_seconds:3600.06
+```
+
+Here is the help output for **runner**:
+
+```bash
+~$ ./block-generator runner -h
+Run an automated test suite using the block-generator daemon and a provided conduit binary. Results are captured to a specified output directory.
+
+Usage:
+ block-generator runner [flags]
+
+Flags:
+ -i, --conduit-binary string Path to conduit binary.
+ --cpuprofile string Path where conduit writes its CPU profile.
+ -h, --help help for runner
+ -k, --keep-data-dir If set the validator will not delete the data directory after tests complete.
+ -l, --log-level string LogLevel to use when starting conduit. [panic, fatal, error, warn, info, debug, trace] (default "error")
+ -p, --metrics-port uint Port to start the metrics server at. (default 9999)
+ -c, --postgres-connection-string string Postgres connection string.
+ -r, --report-directory string Location to place test reports.
+ --reset If set any existing report directory will be deleted before running tests.
+ -s, --scenario string Directory containing scenarios, or specific scenario file.
+ -d, --test-duration duration Duration to use for each scenario. (default 5m0s)
+ --validate If set the validator will run after test-duration has elapsed to verify data is correct. An extra line in each report indicates validator success or failure.
+```
+
+## Example Run using Conduit and Postgres in **bash** via `run_runner.sh`
+
+A typical **runner** scenario involves:
+
+* a [scenario configuration](#scenario-configuration) file, e.g. [test_config.yml](./test_config.yml)
+* access to a `conduit` binary to query the block generator's mock Algod endpoint and ingest the synthetic blocks
+* a datastore -such as a postgres database- to collect `conduit`'s output
+* a `conduit` config file to define its import/export behavior
+
+`run_runner.sh` makes the following choices for the previous bullet points:
+
+* it can accept any scenario as its second argument, but defaults to [test_config.yml](./test_config.yml) when this isn't provided (this is a scenario with a lifetime of ~30 seconds)
+* knows how to import through a mock Algod running on port 11112 (which is the port the runner avails)
+* sets up a dockerized postgres database to receive conduit's output
+* configures `conduit` for these specs using [this config template](./runner/template/conduit.yml.tmpl)
+
+### Sample Run
+
+First you'll need to get a `conduit` binary. For example you can follow the [developer portal's instructions](https://developer.algorand.org/docs/get-details/conduit/GettingStarted/#installation) or run `go build .` inside of the directory `cmd/conduit` after downloading the `conduit` repo.
+
+Assume you've navigated to the `tools/block-generator` directory of
+the `go-algorand` repo, and:
+
+* saved the conduit binary to `tools/block-generator/conduit`
+* created a block generator scenario config at `tools/block-generator/scenario.yml`
+
+Then you can execute the following command to run the scenario:
+
+```sh
+./run_runner.sh ./conduit scenario.yml
+```
+
+### Scenario Report
+
+If all goes well, the run will generate a directory `tmp/OUTPUT_RUN_RUNNER_TEST`
+and in that directory you can see the statisticsn of the run in `scenario.report`.
diff --git a/tools/block-generator/core/commands.go b/tools/block-generator/core/commands.go
new file mode 100644
index 000000000..af64c8a5d
--- /dev/null
+++ b/tools/block-generator/core/commands.go
@@ -0,0 +1,35 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "github.com/algorand/go-algorand/tools/block-generator/generator"
+ "github.com/algorand/go-algorand/tools/block-generator/runner"
+ "github.com/spf13/cobra"
+)
+
+// BlockGenerator related cobra commands, ready to be executed or included as subcommands.
+var BlockGenerator *cobra.Command
+
+func init() {
+ BlockGenerator = &cobra.Command{
+ Use: `block-generator`,
+ Short: `Block generator testing tools.`,
+ }
+ BlockGenerator.AddCommand(runner.RunnerCmd)
+ BlockGenerator.AddCommand(generator.DaemonCmd)
+}
diff --git a/tools/block-generator/generator/daemon.go b/tools/block-generator/generator/daemon.go
new file mode 100644
index 000000000..36c49cac5
--- /dev/null
+++ b/tools/block-generator/generator/daemon.go
@@ -0,0 +1,52 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package generator
+
+import (
+ "fmt"
+ "math/rand"
+
+ "github.com/spf13/cobra"
+)
+
+// DaemonCmd starts a block generator daemon.
+var DaemonCmd *cobra.Command
+
+func init() {
+ rand.Seed(12345)
+
+ var configFile string
+ var port uint64
+
+ DaemonCmd = &cobra.Command{
+ Use: "daemon",
+ Short: "Start the generator daemon in standalone mode.",
+ Run: func(cmd *cobra.Command, args []string) {
+ addr := fmt.Sprintf(":%d", port)
+ srv, _ := MakeServer(configFile, addr)
+ err := srv.ListenAndServe()
+ if err != nil {
+ panic(err)
+ }
+ },
+ }
+
+ DaemonCmd.Flags().StringVarP(&configFile, "config", "c", "", "Specify the block configuration yaml file.")
+ DaemonCmd.Flags().Uint64VarP(&port, "port", "p", 4010, "Port to start the server at.")
+
+ DaemonCmd.MarkFlagRequired("config")
+}
diff --git a/tools/block-generator/generator/generate.go b/tools/block-generator/generator/generate.go
new file mode 100644
index 000000000..48df2b06c
--- /dev/null
+++ b/tools/block-generator/generator/generate.go
@@ -0,0 +1,870 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package generator
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "math/rand"
+ "os"
+ "time"
+
+ cconfig "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/committee"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/rpcs"
+)
+
+// TxTypeID is the transaction type.
+type TxTypeID string
+
+const (
+ genesis TxTypeID = "genesis"
+
+ // Payment Tx IDs
+ paymentTx TxTypeID = "pay"
+ paymentAcctCreateTx TxTypeID = "pay_create"
+ assetTx TxTypeID = "asset"
+ //keyRegistrationTx TxTypeID = "keyreg"
+ //applicationCallTx TxTypeID = "appl"
+
+ // Asset Tx IDs
+ assetCreate TxTypeID = "asset_create"
+ assetOptin TxTypeID = "asset_optin"
+ assetXfer TxTypeID = "asset_xfer"
+ assetClose TxTypeID = "asset_close"
+ assetDestroy TxTypeID = "asset_destroy"
+
+ assetTotal = uint64(100000000000000000)
+
+ consensusTimeMilli int64 = 4500
+ startingTxnCounter uint64 = 1000
+)
+
+// GenerationConfig defines the tunable parameters for block generation.
+type GenerationConfig struct {
+ Name string `yaml:"name"`
+ NumGenesisAccounts uint64 `yaml:"genesis_accounts"`
+ GenesisAccountInitialBalance uint64 `yaml:"genesis_account_balance"`
+
+ // Block generation
+ TxnPerBlock uint64 `yaml:"tx_per_block"`
+
+ // TX Distribution
+ PaymentTransactionFraction float32 `yaml:"tx_pay_fraction"`
+ AssetTransactionFraction float32 `yaml:"tx_asset_fraction"`
+
+ // Payment configuration
+ PaymentNewAccountFraction float32 `yaml:"pay_acct_create_fraction"`
+ PaymentFraction float32 `yaml:"pay_xfer_fraction"`
+
+ // Asset configuration
+ AssetCreateFraction float32 `yaml:"asset_create_fraction"`
+ AssetDestroyFraction float32 `yaml:"asset_destroy_fraction"`
+ AssetOptinFraction float32 `yaml:"asset_optin_fraction"`
+ AssetCloseFraction float32 `yaml:"asset_close_fraction"`
+ AssetXferFraction float32 `yaml:"asset_xfer_fraction"`
+}
+
+func sumIsCloseToOne(numbers ...float32) bool {
+ var sum float32
+ for _, num := range numbers {
+ sum += num
+ }
+ return sum > 0.99 && sum < 1.01
+}
+
+// MakeGenerator initializes the Generator object.
+func MakeGenerator(dbround uint64, bkGenesis bookkeeping.Genesis, config GenerationConfig) (Generator, error) {
+ if !sumIsCloseToOne(config.PaymentTransactionFraction, config.AssetTransactionFraction) {
+ return nil, fmt.Errorf("transaction distribution ratios should equal 1")
+ }
+
+ if !sumIsCloseToOne(config.PaymentNewAccountFraction, config.PaymentFraction) {
+ return nil, fmt.Errorf("payment configuration ratios should equal 1")
+ }
+
+ if !sumIsCloseToOne(config.AssetCreateFraction, config.AssetDestroyFraction, config.AssetOptinFraction, config.AssetCloseFraction, config.AssetXferFraction) {
+ return nil, fmt.Errorf("asset configuration ratios should equal 1")
+ }
+
+ var proto protocol.ConsensusVersion = "future"
+ gen := &generator{
+ config: config,
+ protocol: proto,
+ params: cconfig.Consensus[proto],
+ genesis: bkGenesis,
+ genesisHash: [32]byte{},
+ genesisID: "blockgen-test",
+ prevBlockHash: "",
+ round: 0,
+ txnCounter: startingTxnCounter,
+ timestamp: 0,
+ rewardsLevel: 0,
+ rewardsResidue: 0,
+ rewardsRate: 0,
+ rewardsRecalculationRound: 0,
+ reportData: make(map[TxTypeID]TxData),
+ roundOffset: dbround,
+ }
+
+ gen.feeSink[31] = 1
+ gen.rewardsPool[31] = 2
+ gen.genesisHash[31] = 3
+
+ // if genesis is provided
+ if bkGenesis.Network != "" {
+ gen.genesisID = bkGenesis.ID()
+ gen.genesisHash = bkGenesis.Hash()
+ }
+
+ gen.initializeAccounting()
+ gen.initializeLedger()
+ for _, val := range getTransactionOptions() {
+ switch val {
+ case paymentTx:
+ gen.transactionWeights = append(gen.transactionWeights, config.PaymentTransactionFraction)
+ case assetTx:
+ gen.transactionWeights = append(gen.transactionWeights, config.AssetTransactionFraction)
+ }
+ }
+
+ for _, val := range getPaymentTxOptions() {
+ switch val {
+ case paymentTx:
+ gen.payTxWeights = append(gen.payTxWeights, config.PaymentFraction)
+ case paymentAcctCreateTx:
+ gen.payTxWeights = append(gen.payTxWeights, config.PaymentNewAccountFraction)
+ }
+ }
+
+ for _, val := range getAssetTxOptions() {
+ switch val {
+ case assetCreate:
+ gen.assetTxWeights = append(gen.assetTxWeights, config.AssetCreateFraction)
+ case assetDestroy:
+ gen.assetTxWeights = append(gen.assetTxWeights, config.AssetDestroyFraction)
+ case assetOptin:
+ gen.assetTxWeights = append(gen.assetTxWeights, config.AssetOptinFraction)
+ case assetXfer:
+ gen.assetTxWeights = append(gen.assetTxWeights, config.AssetXferFraction)
+ case assetClose:
+ gen.assetTxWeights = append(gen.assetTxWeights, config.AssetCloseFraction)
+ }
+ }
+
+ return gen, nil
+}
+
+// Generator is the interface needed to generate blocks.
+type Generator interface {
+ WriteReport(output io.Writer) error
+ WriteGenesis(output io.Writer) error
+ WriteBlock(output io.Writer, round uint64) error
+ WriteAccount(output io.Writer, accountString string) error
+ WriteStatus(output io.Writer) error
+ WriteDeltas(output io.Writer, round uint64) error
+ Accounts() <-chan basics.Address
+ Stop()
+}
+
+type generator struct {
+ config GenerationConfig
+
+ // payment transaction metadata
+ numPayments uint64
+
+ // Number of algorand accounts
+ numAccounts uint64
+
+ // Block stuff
+ round uint64
+ txnCounter uint64
+ prevBlockHash string
+ timestamp int64
+ protocol protocol.ConsensusVersion
+ params cconfig.ConsensusParams
+ genesis bookkeeping.Genesis
+ genesisID string
+ genesisHash crypto.Digest
+
+ // Rewards stuff
+ feeSink basics.Address
+ rewardsPool basics.Address
+ rewardsLevel uint64
+ rewardsResidue uint64
+ rewardsRate uint64
+ rewardsRecalculationRound uint64
+
+ // balances for all accounts. To avoid crypto and reduce storage, accounts are faked.
+ // The account is based on the index into the balances array.
+ balances []uint64
+
+ // assets is a minimal representation of the asset holdings, it doesn't
+ // include the frozen state.
+ assets []*assetData
+ // pendingAssets is used to hold newly created assets so that they are not used before
+ // being created.
+ pendingAssets []*assetData
+
+ transactionWeights []float32
+ payTxWeights []float32
+ assetTxWeights []float32
+
+ // Reporting information from transaction type to data
+ reportData Report
+
+ // ledger
+ ledger *ledger.Ledger
+
+ roundOffset uint64
+}
+
+type assetData struct {
+ assetID uint64
+ creator uint64
+ name string
+ // Holding at index 0 is the creator.
+ holdings []*assetHolding
+ // Set of holders in the holdings array for easy reference.
+ holders map[uint64]*assetHolding
+}
+
+type assetHolding struct {
+ acctIndex uint64
+ balance uint64
+}
+
+// Report is the generation report.
+type Report map[TxTypeID]TxData
+
+// TxData is the generator report data.
+type TxData struct {
+ GenerationTime time.Duration `json:"generation_time_milli"`
+ GenerationCount uint64 `json:"num_generated"`
+}
+
+func track(id TxTypeID) (TxTypeID, time.Time) {
+ return id, time.Now()
+}
+func (g *generator) recordData(id TxTypeID, start time.Time) {
+ data := g.reportData[id]
+ data.GenerationCount++
+ data.GenerationTime += time.Since(start)
+ g.reportData[id] = data
+}
+
+func (g *generator) WriteReport(output io.Writer) error {
+ return json.NewEncoder(output).Encode(g.reportData)
+}
+
+func (g *generator) WriteStatus(output io.Writer) error {
+ response := model.NodeStatusResponse{
+ LastRound: g.round + g.roundOffset,
+ }
+ return json.NewEncoder(output).Encode(response)
+}
+
+func (g *generator) WriteGenesis(output io.Writer) error {
+ defer g.recordData(track(genesis))
+
+ // return user provided genesis
+ if g.genesis.Network != "" {
+ _, err := output.Write(protocol.EncodeJSON(g.genesis))
+ return err
+ }
+
+ // return synthetic genesis
+ var allocations []bookkeeping.GenesisAllocation
+ for i := uint64(0); i < g.config.NumGenesisAccounts; i++ {
+ addr := indexToAccount(i)
+ allocations = append(allocations, bookkeeping.GenesisAllocation{
+ Address: addr.String(),
+ State: basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: g.config.GenesisAccountInitialBalance},
+ },
+ })
+ }
+ // Also add the rewards pool account with minimum balance. Without it, the evaluator
+ // crashes.
+ allocations = append(allocations, bookkeeping.GenesisAllocation{
+ Address: g.rewardsPool.String(),
+ Comment: "RewardsPool",
+ State: basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: g.params.MinBalance},
+ Status: basics.NotParticipating,
+ },
+ })
+
+ gen := bookkeeping.Genesis{
+ SchemaID: "v1",
+ Network: "generated-network",
+ Proto: g.protocol,
+ Allocation: allocations,
+ RewardsPool: g.rewardsPool.String(),
+ FeeSink: g.feeSink.String(),
+ Timestamp: g.timestamp,
+ }
+
+ _, err := output.Write(protocol.EncodeJSON(gen))
+ return err
+}
+
+func getTransactionOptions() []interface{} {
+ return []interface{}{paymentTx, assetTx}
+}
+
+func (g *generator) generateTransaction(round uint64, intra uint64) (transactions.SignedTxn, transactions.ApplyData, error) {
+ selection, err := weightedSelection(g.transactionWeights, getTransactionOptions(), paymentTx)
+ if err != nil {
+ return transactions.SignedTxn{}, transactions.ApplyData{}, err
+ }
+
+ switch selection {
+ case paymentTx:
+ return g.generatePaymentTxn(round, intra)
+ case assetTx:
+ return g.generateAssetTxn(round, intra)
+ default:
+ return transactions.SignedTxn{}, transactions.ApplyData{}, fmt.Errorf("no generator available for %s", selection)
+ }
+}
+
+func (g *generator) txnForRound(round uint64) uint64 {
+ // There are no transactions in the 0th round
+ if round == 0 {
+ return 0
+ }
+ return g.config.TxnPerBlock
+}
+
+// finishRound tells the generator it can apply any pending state.
+func (g *generator) finishRound(txnCount uint64) {
+ g.txnCounter += txnCount
+
+ g.timestamp += consensusTimeMilli
+ g.round++
+
+ // Apply pending assets...
+ g.assets = append(g.assets, g.pendingAssets...)
+ g.pendingAssets = nil
+}
+
+// WriteBlock generates a block full of new transactions and writes it to the writer.
+func (g *generator) WriteBlock(output io.Writer, round uint64) error {
+ if round < g.roundOffset {
+ return fmt.Errorf("cannot generate block for round %d, already in database", round)
+ }
+ if round-g.roundOffset != g.round {
+ return fmt.Errorf("generator only supports sequential block access. Expected %d but received request for %d", g.round+g.roundOffset, round)
+ }
+ numTxnForBlock := g.txnForRound(g.round)
+
+ // return genesis block. offset round for non-empty database
+ if round-g.roundOffset == 0 {
+ // write the msgpack bytes for a block
+ block, _, _ := g.ledger.BlockCert(basics.Round(round - g.roundOffset))
+ // return the block with the requested round number
+ block.BlockHeader.Round = basics.Round(round)
+ encodedblock := rpcs.EncodedBlockCert{Block: block}
+ blk := protocol.EncodeMsgp(&encodedblock)
+ // write the msgpack bytes for a block
+ _, err := output.Write(blk)
+ if err != nil {
+ return err
+ }
+ g.finishRound(numTxnForBlock)
+ return nil
+ }
+
+ header := bookkeeping.BlockHeader{
+ Round: basics.Round(g.round),
+ Branch: bookkeeping.BlockHash{},
+ Seed: committee.Seed{},
+ TxnCommitments: bookkeeping.TxnCommitments{NativeSha512_256Commitment: crypto.Digest{}},
+ TimeStamp: g.timestamp,
+ GenesisID: g.genesisID,
+ GenesisHash: g.genesisHash,
+ RewardsState: bookkeeping.RewardsState{
+ FeeSink: g.feeSink,
+ RewardsPool: g.rewardsPool,
+ RewardsLevel: 0,
+ RewardsRate: 0,
+ RewardsResidue: 0,
+ RewardsRecalculationRound: 0,
+ },
+ UpgradeState: bookkeeping.UpgradeState{
+ CurrentProtocol: g.protocol,
+ },
+ UpgradeVote: bookkeeping.UpgradeVote{},
+ TxnCounter: g.txnCounter + numTxnForBlock,
+ StateProofTracking: nil,
+ }
+
+ // Generate the transactions
+ transactions := make([]transactions.SignedTxnInBlock, 0, numTxnForBlock)
+
+ for i := uint64(0); i < numTxnForBlock; i++ {
+ txn, ad, err := g.generateTransaction(g.round, i)
+ if err != nil {
+ panic(fmt.Sprintf("failed to generate transaction: %v\n", err))
+ }
+ stib, err := header.EncodeSignedTxn(txn, ad)
+ if err != nil {
+ panic(fmt.Sprintf("failed to encode transaction: %v\n", err))
+ }
+ transactions = append(transactions, stib)
+ }
+
+ if numTxnForBlock != uint64(len(transactions)) {
+ panic("Unexpected number of transactions.")
+ }
+
+ cert := rpcs.EncodedBlockCert{
+ Block: bookkeeping.Block{
+ BlockHeader: header,
+ Payset: transactions,
+ },
+ Certificate: agreement.Certificate{},
+ }
+
+ err := g.ledger.AddBlock(cert.Block, cert.Certificate)
+ if err != nil {
+ return err
+ }
+ // return the block with the requested round number
+ cert.Block.BlockHeader.Round = basics.Round(round)
+ block := protocol.EncodeMsgp(&cert)
+ if err != nil {
+ return err
+ }
+ // write the msgpack bytes for a block
+ _, err = output.Write(block)
+ if err != nil {
+ return err
+ }
+ g.finishRound(numTxnForBlock)
+ return nil
+}
+
+// WriteDeltas generates returns the deltas for payset.
+func (g *generator) WriteDeltas(output io.Writer, round uint64) error {
+ // the first generated round has no statedelta.
+ if round-g.roundOffset == 0 {
+ data, _ := encode(protocol.CodecHandle, ledgercore.StateDelta{})
+ _, err := output.Write(data)
+ if err != nil {
+ return err
+ }
+ return nil
+ }
+ delta, err := g.ledger.GetStateDeltaForRound(basics.Round(round - g.roundOffset))
+ if err != nil {
+ return fmt.Errorf("err getting state delta for round %d: %w", round, err)
+ }
+ // msgp encode deltas
+ data, err := encode(protocol.CodecHandle, delta)
+ if err != nil {
+ return err
+ }
+ _, err = output.Write(data)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// initializeAccounting creates the genesis accounts.
+func (g *generator) initializeAccounting() {
+ if g.config.NumGenesisAccounts == 0 {
+ panic("Number of genesis accounts must be > 0.")
+ }
+
+ g.numPayments = 0
+ g.numAccounts = g.config.NumGenesisAccounts
+ for i := uint64(0); i < g.config.NumGenesisAccounts; i++ {
+ g.balances = append(g.balances, g.config.GenesisAccountInitialBalance)
+ }
+}
+
+func signTxn(txn transactions.Transaction) transactions.SignedTxn {
+ stxn := transactions.SignedTxn{
+ Sig: crypto.Signature{},
+ Msig: crypto.MultisigSig{},
+ Lsig: transactions.LogicSig{},
+ Txn: txn,
+ AuthAddr: basics.Address{},
+ }
+
+ // TODO: Would it be useful to generate a random signature?
+ stxn.Sig[32] = 50
+
+ return stxn
+}
+
+func getPaymentTxOptions() []interface{} {
+ return []interface{}{paymentTx, paymentAcctCreateTx}
+}
+
+// generatePaymentTxn creates a new payment transaction. The sender is always a genesis account, the receiver is random,
+// or a new account.
+func (g *generator) generatePaymentTxn(round uint64, intra uint64) (transactions.SignedTxn, transactions.ApplyData, error) {
+ selection, err := weightedSelection(g.payTxWeights, getPaymentTxOptions(), paymentTx)
+ if err != nil {
+ return transactions.SignedTxn{}, transactions.ApplyData{}, err
+ }
+ return g.generatePaymentTxnInternal(selection.(TxTypeID), round, intra)
+}
+
+func (g *generator) generatePaymentTxnInternal(selection TxTypeID, round uint64, intra uint64) (transactions.SignedTxn, transactions.ApplyData, error) {
+ defer g.recordData(track(selection))
+ minBal := g.params.MinBalance
+
+ // default amount
+ amount := uint64(1)
+
+ // Select a receiver
+ var receiveIndex uint64
+ switch selection {
+ case paymentTx:
+ receiveIndex = rand.Uint64() % g.numAccounts
+ case paymentAcctCreateTx:
+ // give new accounts get extra algos for sending other transactions
+ amount = minBal * 100
+ g.balances = append(g.balances, 0)
+ receiveIndex = g.numAccounts
+ g.numAccounts++
+ }
+ total := amount + g.params.MinTxnFee
+
+ // Select a sender from genesis account
+ sendIndex := g.numPayments % g.config.NumGenesisAccounts
+ if g.balances[sendIndex] < (total + minBal) {
+ fmt.Printf("\n\ngeneratePaymentTxnInternal(): the sender account does not have enough algos for the transfer. idx %d, payment number %d\n\n", sendIndex, g.numPayments)
+ os.Exit(1)
+ }
+
+ sender := indexToAccount(sendIndex)
+ receiver := indexToAccount(receiveIndex)
+
+ g.balances[sendIndex] -= total
+ g.balances[receiveIndex] += amount
+
+ g.numPayments++
+
+ txn := g.makePaymentTxn(g.makeTxnHeader(sender, round, intra), receiver, amount, basics.Address{})
+ return signTxn(txn), transactions.ApplyData{}, nil
+}
+
+func getAssetTxOptions() []interface{} {
+ return []interface{}{assetCreate, assetDestroy, assetOptin, assetXfer, assetClose}
+}
+
+func (g *generator) generateAssetTxnInternal(txType TxTypeID, round uint64, intra uint64) (actual TxTypeID, txn transactions.Transaction) {
+ return g.generateAssetTxnInternalHint(txType, round, intra, 0, nil)
+}
+
+func (g *generator) generateAssetTxnInternalHint(txType TxTypeID, round uint64, intra uint64, hintIndex uint64, hint *assetData) (actual TxTypeID, txn transactions.Transaction) {
+ actual = txType
+ // If there are no assets the next operation needs to be a create.
+ if len(g.assets) == 0 {
+ actual = assetCreate
+ }
+
+ numAssets := uint64(len(g.assets))
+ var senderIndex uint64
+ if actual == assetCreate {
+ numAssets = uint64(len(g.assets)) + uint64(len(g.pendingAssets))
+ senderIndex = numAssets % g.config.NumGenesisAccounts
+ senderAcct := indexToAccount(senderIndex)
+
+ total := assetTotal
+ assetID := g.txnCounter + intra + 1
+ assetName := fmt.Sprintf("asset #%d", assetID)
+ txn = g.makeAssetCreateTxn(g.makeTxnHeader(senderAcct, round, intra), total, false, assetName)
+ // Compute asset ID and initialize holdings
+ holding := assetHolding{
+ acctIndex: senderIndex,
+ balance: total,
+ }
+ a := assetData{
+ name: assetName,
+ assetID: assetID,
+ creator: senderIndex,
+ holdings: []*assetHolding{&holding},
+ holders: map[uint64]*assetHolding{senderIndex: &holding},
+ }
+
+ g.pendingAssets = append(g.pendingAssets, &a)
+ } else {
+ assetIndex := rand.Uint64() % numAssets
+ asset := g.assets[assetIndex]
+ if hint != nil {
+ assetIndex = hintIndex
+ asset = hint
+ }
+
+ switch actual {
+ case assetDestroy:
+ // delete asset
+
+ // If the creator doesn't have all of them, close instead
+ if asset.holdings[0].balance != assetTotal {
+ return g.generateAssetTxnInternalHint(assetClose, round, intra, assetIndex, asset)
+ }
+
+ senderIndex = asset.creator
+ creator := indexToAccount(senderIndex)
+ txn = g.makeAssetDestroyTxn(g.makeTxnHeader(creator, round, intra), asset.assetID)
+
+ // Remove asset by moving the last element to the deleted index then trimming the slice.
+ g.assets[assetIndex] = g.assets[numAssets-1]
+ g.assets = g.assets[:numAssets-1]
+ case assetOptin:
+ // select a random account from asset to optin
+
+ // If every account holds the asset, close instead of optin
+ if uint64(len(asset.holdings)) == g.numAccounts {
+ return g.generateAssetTxnInternalHint(assetClose, round, intra, assetIndex, asset)
+ }
+
+ // look for an account that does not hold the asset
+ exists := true
+ for exists {
+ senderIndex = rand.Uint64() % g.numAccounts
+ exists = asset.holders[senderIndex] != nil
+ }
+ account := indexToAccount(senderIndex)
+ txn = g.makeAssetAcceptanceTxn(g.makeTxnHeader(account, round, intra), asset.assetID)
+
+ holding := assetHolding{
+ acctIndex: senderIndex,
+ balance: 0,
+ }
+ asset.holdings = append(asset.holdings, &holding)
+ asset.holders[senderIndex] = &holding
+ case assetXfer:
+ // send from creator (holder[0]) to another random holder (same address is valid)
+
+ // If there aren't enough assets to close one, optin an account instead
+ if len(asset.holdings) == 1 {
+ return g.generateAssetTxnInternalHint(assetOptin, round, intra, assetIndex, asset)
+ }
+
+ senderIndex = asset.holdings[0].acctIndex
+ sender := indexToAccount(senderIndex)
+
+ receiverArrayIndex := (rand.Uint64() % (uint64(len(asset.holdings)) - uint64(1))) + uint64(1)
+ receiver := indexToAccount(asset.holdings[receiverArrayIndex].acctIndex)
+ amount := uint64(10)
+
+ txn = g.makeAssetTransferTxn(g.makeTxnHeader(sender, round, intra), receiver, amount, basics.Address{}, asset.assetID)
+
+ if asset.holdings[0].balance < amount {
+ fmt.Printf("\n\ncreator doesn't have enough funds for asset %d\n\n", asset.assetID)
+ os.Exit(1)
+ }
+ if g.balances[asset.holdings[0].acctIndex] < g.params.MinTxnFee {
+ fmt.Printf("\n\ncreator doesn't have enough funds for transaction %d\n\n", asset.assetID)
+ os.Exit(1)
+ }
+
+ asset.holdings[0].balance -= amount
+ asset.holdings[receiverArrayIndex].balance += amount
+ case assetClose:
+ // select a holder of a random asset to close out
+ // If there aren't enough assets to close one, optin an account instead
+ if len(asset.holdings) == 1 {
+ return g.generateAssetTxnInternalHint(
+ assetOptin, round, intra, assetIndex, asset)
+ }
+
+ numHoldings := uint64(len(asset.holdings))
+ closeIndex := (rand.Uint64() % (numHoldings - 1)) + uint64(1)
+ senderIndex = asset.holdings[closeIndex].acctIndex
+ sender := indexToAccount(senderIndex)
+
+ closeToAcctIndex := asset.holdings[0].acctIndex
+ closeToAcct := indexToAccount(closeToAcctIndex)
+
+ txn = g.makeAssetTransferTxn(
+ g.makeTxnHeader(sender, round, intra), closeToAcct, 0, closeToAcct, asset.assetID)
+
+ asset.holdings[0].balance += asset.holdings[closeIndex].balance
+
+ // Remove asset by moving the last element to the deleted index then trimming the slice.
+ asset.holdings[closeIndex] = asset.holdings[numHoldings-1]
+ asset.holdings = asset.holdings[:numHoldings-1]
+ delete(asset.holders, senderIndex)
+ default:
+ }
+ }
+
+ if indexToAccount(senderIndex) != txn.Sender {
+ fmt.Printf("failed to properly set sender index.")
+ os.Exit(1)
+ }
+
+ if g.balances[senderIndex] < txn.Fee.ToUint64() {
+ fmt.Printf("\n\nthe sender account does not have enough algos for the transfer. idx %d, asset transaction type %v, num %d\n\n", senderIndex, actual, g.reportData[actual].GenerationCount)
+ os.Exit(1)
+ }
+ g.balances[senderIndex] -= txn.Fee.ToUint64()
+ return
+}
+
+func (g *generator) generateAssetTxn(round uint64, intra uint64) (transactions.SignedTxn, transactions.ApplyData, error) {
+ start := time.Now()
+ selection, err := weightedSelection(g.assetTxWeights, getAssetTxOptions(), assetXfer)
+ if err != nil {
+ return transactions.SignedTxn{}, transactions.ApplyData{}, err
+ }
+
+ actual, txn := g.generateAssetTxnInternal(selection.(TxTypeID), round, intra)
+ defer g.recordData(actual, start)
+
+ if txn.Type == "" {
+ fmt.Println("Empty asset transaction.")
+ os.Exit(1)
+ }
+
+ return signTxn(txn), transactions.ApplyData{}, nil
+}
+
+func (g *generator) initializeLedger() {
+ genBal := convertToGenesisBalances(g.balances)
+ // add rewards pool with min balance
+ genBal[g.rewardsPool] = basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: g.params.MinBalance},
+ }
+ bal := bookkeeping.MakeGenesisBalances(genBal, g.feeSink, g.rewardsPool)
+ block, err := bookkeeping.MakeGenesisBlock(g.protocol, bal, g.genesisID, g.genesisHash)
+ if err != nil {
+ fmt.Printf("error making genesis: %v\n.", err)
+ os.Exit(1)
+ }
+ var prefix string
+ if g.genesisID == "" {
+ prefix = "block-generator"
+ } else {
+ prefix = g.genesisID
+ }
+ l, err := ledger.OpenLedger(logging.Base(), prefix, true, ledgercore.InitState{
+ Block: block,
+ Accounts: bal.Balances,
+ GenesisHash: g.genesisHash,
+ }, cconfig.GetDefaultLocal())
+ if err != nil {
+ fmt.Printf("error initializing ledger: %v\n.", err)
+ os.Exit(1)
+ }
+ g.ledger = l
+}
+
+// Stop cleans up allocated resources.
+func (g *generator) Stop() {
+ g.ledger.Close()
+}
+
+func (g *generator) WriteAccount(output io.Writer, accountString string) error {
+ addr, err := basics.UnmarshalChecksumAddress(accountString)
+ if err != nil {
+ return fmt.Errorf("failed to unmarshal address: %w", err)
+ }
+
+ idx := accountToIndex(addr)
+
+ // Asset Holdings
+ assets := make([]model.AssetHolding, 0)
+ createdAssets := make([]model.Asset, 0)
+ for _, a := range g.assets {
+ // holdings
+ if holding := a.holders[idx]; holding != nil {
+ assets = append(assets, model.AssetHolding{
+ Amount: holding.balance,
+ AssetID: a.assetID,
+ IsFrozen: false,
+ })
+ }
+ // creator
+ if len(a.holdings) > 0 && a.holdings[0].acctIndex == idx {
+ nameBytes := []byte(a.name)
+ asset := model.Asset{
+ Index: a.assetID,
+ Params: model.AssetParams{
+ Creator: accountString,
+ Decimals: 0,
+ Clawback: &accountString,
+ Freeze: &accountString,
+ Manager: &accountString,
+ Reserve: &accountString,
+ Name: &a.name,
+ NameB64: &nameBytes,
+ Total: assetTotal,
+ },
+ }
+ asset.Params.DefaultFrozen = new(bool)
+ *(asset.Params.DefaultFrozen) = false
+ createdAssets = append(createdAssets, asset)
+ }
+ }
+
+ data := model.Account{
+ Address: accountString,
+ Amount: g.balances[idx],
+ AmountWithoutPendingRewards: g.balances[idx],
+ AppsLocalState: nil,
+ AppsTotalExtraPages: nil,
+ AppsTotalSchema: nil,
+ Assets: &assets,
+ AuthAddr: nil,
+ CreatedApps: nil,
+ CreatedAssets: &createdAssets,
+ Participation: nil,
+ PendingRewards: 0,
+ RewardBase: nil,
+ Rewards: 0,
+ Round: g.round - 1,
+ SigType: nil,
+ Status: "Offline",
+ }
+
+ return json.NewEncoder(output).Encode(data)
+}
+
+// Accounts is used in the runner to generate a list of addresses.
+func (g *generator) Accounts() <-chan basics.Address {
+ results := make(chan basics.Address, 10)
+ go func() {
+ defer close(results)
+ for i := uint64(0); i < g.numAccounts; i++ {
+ results <- indexToAccount(i)
+ }
+ }()
+ return results
+}
diff --git a/tools/block-generator/generator/generate_test.go b/tools/block-generator/generator/generate_test.go
new file mode 100644
index 000000000..dddcf1758
--- /dev/null
+++ b/tools/block-generator/generator/generate_test.go
@@ -0,0 +1,364 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package generator
+
+import (
+ "bytes"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/rpcs"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func makePrivateGenerator(t *testing.T, round uint64, genesis bookkeeping.Genesis) *generator {
+ publicGenerator, err := MakeGenerator(round, genesis, GenerationConfig{
+ NumGenesisAccounts: 10,
+ GenesisAccountInitialBalance: 1000000000000,
+ PaymentTransactionFraction: 1.0,
+ PaymentNewAccountFraction: 1.0,
+ AssetCreateFraction: 1.0,
+ })
+ require.NoError(t, err)
+ return publicGenerator.(*generator)
+}
+
+func TestPaymentAcctCreate(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ g := makePrivateGenerator(t, 0, bookkeeping.Genesis{})
+ g.generatePaymentTxnInternal(paymentAcctCreateTx, 0, 0)
+ require.Len(t, g.balances, int(g.config.NumGenesisAccounts+1))
+}
+
+func TestPaymentTransfer(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ g := makePrivateGenerator(t, 0, bookkeeping.Genesis{})
+ g.generatePaymentTxnInternal(paymentTx, 0, 0)
+ require.Len(t, g.balances, int(g.config.NumGenesisAccounts))
+}
+
+func TestAssetXferNoAssetsOverride(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ g := makePrivateGenerator(t, 0, bookkeeping.Genesis{})
+
+ // First asset transaction must create.
+ actual, txn := g.generateAssetTxnInternal(assetXfer, 1, 0)
+ require.Equal(t, assetCreate, actual)
+ require.Equal(t, protocol.AssetConfigTx, txn.Type)
+ require.Len(t, g.assets, 0)
+ require.Len(t, g.pendingAssets, 1)
+ require.Len(t, g.pendingAssets[0].holdings, 1)
+ require.Len(t, g.pendingAssets[0].holders, 1)
+}
+
+func TestAssetXferOneHolderOverride(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ g := makePrivateGenerator(t, 0, bookkeeping.Genesis{})
+ g.finishRound(0)
+ g.generateAssetTxnInternal(assetCreate, 1, 0)
+ g.finishRound(1)
+
+ // Transfer converted to optin if there is only 1 holder.
+ actual, txn := g.generateAssetTxnInternal(assetXfer, 2, 0)
+ require.Equal(t, assetOptin, actual)
+ require.Equal(t, protocol.AssetTransferTx, txn.Type)
+ require.Len(t, g.assets, 1)
+ // A new holding is created, indicating the optin
+ require.Len(t, g.assets[0].holdings, 2)
+ require.Len(t, g.assets[0].holders, 2)
+}
+
+func TestAssetCloseCreatorOverride(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ g := makePrivateGenerator(t, 0, bookkeeping.Genesis{})
+ g.finishRound(0)
+ g.generateAssetTxnInternal(assetCreate, 1, 0)
+ g.finishRound(1)
+
+ // Instead of closing the creator, optin a new account
+ actual, txn := g.generateAssetTxnInternal(assetClose, 2, 0)
+ require.Equal(t, assetOptin, actual)
+ require.Equal(t, protocol.AssetTransferTx, txn.Type)
+ require.Len(t, g.assets, 1)
+ // A new holding is created, indicating the optin
+ require.Len(t, g.assets[0].holdings, 2)
+ require.Len(t, g.assets[0].holders, 2)
+}
+
+func TestAssetOptinEveryAccountOverride(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ g := makePrivateGenerator(t, 0, bookkeeping.Genesis{})
+ g.finishRound(0)
+ g.generateAssetTxnInternal(assetCreate, 1, 0)
+ g.finishRound(1)
+
+ // Opt all the accounts in, this also verifies that no account is opted in twice
+ var txn transactions.Transaction
+ var actual TxTypeID
+ for i := 2; uint64(i) <= g.numAccounts; i++ {
+ actual, txn = g.generateAssetTxnInternal(assetOptin, 2, uint64(1+i))
+ require.Equal(t, assetOptin, actual)
+ require.Equal(t, protocol.AssetTransferTx, txn.Type)
+ require.Len(t, g.assets, 1)
+ require.Len(t, g.assets[0].holdings, i)
+ require.Len(t, g.assets[0].holders, i)
+ }
+ g.finishRound(2)
+
+ // All accounts have opted in
+ require.Equal(t, g.numAccounts, uint64(len(g.assets[0].holdings)))
+
+ // The next optin closes instead
+ actual, txn = g.generateAssetTxnInternal(assetOptin, 3, 0)
+ g.finishRound(3)
+ require.Equal(t, assetClose, actual)
+ require.Equal(t, protocol.AssetTransferTx, txn.Type)
+ require.Len(t, g.assets, 1)
+ require.Len(t, g.assets[0].holdings, int(g.numAccounts-1))
+ require.Len(t, g.assets[0].holders, int(g.numAccounts-1))
+}
+
+func TestAssetDestroyWithHoldingsOverride(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ g := makePrivateGenerator(t, 0, bookkeeping.Genesis{})
+ g.finishRound(0)
+ g.generateAssetTxnInternal(assetCreate, 1, 0)
+ g.finishRound(1)
+ g.generateAssetTxnInternal(assetOptin, 2, 0)
+ g.finishRound(2)
+ g.generateAssetTxnInternal(assetXfer, 3, 0)
+ g.finishRound(3)
+ require.Len(t, g.assets[0].holdings, 2)
+ require.Len(t, g.assets[0].holders, 2)
+
+ actual, txn := g.generateAssetTxnInternal(assetDestroy, 4, 0)
+ require.Equal(t, assetClose, actual)
+ require.Equal(t, protocol.AssetTransferTx, txn.Type)
+ require.Len(t, g.assets, 1)
+ require.Len(t, g.assets[0].holdings, 1)
+ require.Len(t, g.assets[0].holders, 1)
+}
+
+func TestAssetTransfer(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ g := makePrivateGenerator(t, 0, bookkeeping.Genesis{})
+ g.finishRound(0)
+
+ g.generateAssetTxnInternal(assetCreate, 1, 0)
+ g.finishRound(1)
+ g.generateAssetTxnInternal(assetOptin, 2, 0)
+ g.finishRound(2)
+ g.generateAssetTxnInternal(assetXfer, 3, 0)
+ g.finishRound(3)
+ require.Greater(t, g.assets[0].holdings[1].balance, uint64(0))
+}
+
+func TestAssetDestroy(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ g := makePrivateGenerator(t, 0, bookkeeping.Genesis{})
+ g.finishRound(0)
+ g.generateAssetTxnInternal(assetCreate, 1, 0)
+ g.finishRound(1)
+ require.Len(t, g.assets, 1)
+
+ actual, txn := g.generateAssetTxnInternal(assetDestroy, 2, 0)
+ require.Equal(t, assetDestroy, actual)
+ require.Equal(t, protocol.AssetConfigTx, txn.Type)
+ require.Len(t, g.assets, 0)
+}
+
+func TestWriteRoundZero(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ var testcases = []struct {
+ name string
+ dbround uint64
+ round uint64
+ genesis bookkeeping.Genesis
+ }{
+ {
+ name: "empty database",
+ dbround: 0,
+ round: 0,
+ genesis: bookkeeping.Genesis{},
+ },
+ {
+ name: "preloaded database",
+ dbround: 1,
+ round: 1,
+ genesis: bookkeeping.Genesis{Network: "TestWriteRoundZero"},
+ },
+ }
+ for _, tc := range testcases {
+ tc := tc
+ t.Run(fmt.Sprintf("%s", tc.name), func(t *testing.T) {
+ t.Parallel()
+ g := makePrivateGenerator(t, tc.dbround, tc.genesis)
+ var data []byte
+ writer := bytes.NewBuffer(data)
+ g.WriteBlock(writer, tc.round)
+ var block rpcs.EncodedBlockCert
+ protocol.Decode(data, &block)
+ require.Len(t, block.Block.Payset, 0)
+ g.ledger.Close()
+ })
+ }
+
+}
+
+func TestWriteRound(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ g := makePrivateGenerator(t, 0, bookkeeping.Genesis{})
+ var data []byte
+ writer := bytes.NewBuffer(data)
+ g.WriteBlock(writer, 0)
+ g.WriteBlock(writer, 1)
+ var block rpcs.EncodedBlockCert
+ protocol.Decode(data, &block)
+ require.Len(t, block.Block.Payset, int(g.config.TxnPerBlock))
+ require.NotNil(t, g.ledger)
+ require.Equal(t, basics.Round(1), g.ledger.Latest())
+ _, err := g.ledger.GetStateDeltaForRound(1)
+ require.NoError(t, err)
+ // request a block that is several rounds ahead of the current round
+ err = g.WriteBlock(writer, 10)
+ require.NotNil(t, err)
+ require.Equal(t, err.Error(), "generator only supports sequential block access. Expected 2 but received request for 10")
+}
+
+func TestWriteRoundWithPreloadedDB(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ var testcases = []struct {
+ name string
+ dbround uint64
+ round uint64
+ genesis bookkeeping.Genesis
+ err error
+ }{
+ {
+ name: "preloaded database starting at round 1",
+ dbround: 1,
+ round: 1,
+ genesis: bookkeeping.Genesis{Network: "generator-test1"},
+ err: nil,
+ },
+ {
+ name: "invalid request",
+ dbround: 10,
+ round: 1,
+ genesis: bookkeeping.Genesis{Network: "generator-test2"},
+ err: fmt.Errorf("cannot generate block for round 1, already in database"),
+ },
+ {
+ name: "invalid request 2",
+ dbround: 1,
+ round: 10,
+ genesis: bookkeeping.Genesis{Network: "generator-test3"},
+ err: fmt.Errorf("generator only supports sequential block access. Expected 2 but received request for 10"),
+ },
+ {
+ name: "preloaded database starting at 10",
+ dbround: 10,
+ round: 11,
+ genesis: bookkeeping.Genesis{Network: "generator-test4"},
+ err: nil,
+ },
+ {
+ name: "preloaded database request round 20",
+ dbround: 10,
+ round: 20,
+ genesis: bookkeeping.Genesis{Network: "generator-test5"},
+ err: nil,
+ },
+ }
+ for _, tc := range testcases {
+ tc := tc
+ t.Run(fmt.Sprintf("%s", tc.name), func(t *testing.T) {
+ t.Parallel()
+ g := makePrivateGenerator(t, tc.dbround, tc.genesis)
+ defer g.ledger.Close()
+ var data []byte
+ writer := bytes.NewBuffer(data)
+ err := g.WriteBlock(writer, tc.dbround)
+ require.Nil(t, err)
+ // invalid block request
+ if tc.round != tc.dbround && tc.err != nil {
+ err = g.WriteBlock(writer, tc.round)
+ require.NotNil(t, err)
+ require.Equal(t, err.Error(), tc.err.Error())
+ return
+ }
+ // write the rest of the blocks
+ for i := tc.dbround + 1; i <= tc.round; i++ {
+ err = g.WriteBlock(writer, i)
+ require.Nil(t, err)
+ }
+ var block rpcs.EncodedBlockCert
+ protocol.Decode(data, &block)
+ require.Len(t, block.Block.Payset, int(g.config.TxnPerBlock))
+ require.NotNil(t, g.ledger)
+ require.Equal(t, basics.Round(tc.round-tc.dbround), g.ledger.Latest())
+ if tc.round > tc.dbround {
+ _, err = g.ledger.GetStateDeltaForRound(basics.Round(tc.round - tc.dbround))
+ require.NoError(t, err)
+ }
+ })
+ }
+}
+
+func TestHandlers(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ g := makePrivateGenerator(t, 0, bookkeeping.Genesis{})
+ handler := getBlockHandler(g)
+ var testcases = []struct {
+ name string
+ url string
+ err string
+ }{
+ {
+ name: "no block",
+ url: "/v2/blocks/?nothing",
+ err: "invalid request path, /",
+ },
+ {
+ name: "blocks: round must be numeric",
+ url: "/v2/blocks/round",
+ err: `strconv.ParseUint: parsing "round": invalid syntax`,
+ },
+ {
+ name: "deltas: round must be numeric",
+ url: "/v2/deltas/round",
+ err: `strconv.ParseUint: parsing "round": invalid syntax`,
+ },
+ }
+
+ for _, testcase := range testcases {
+ t.Run(testcase.name, func(t *testing.T) {
+ req := httptest.NewRequest("GET", testcase.url, nil)
+ w := httptest.NewRecorder()
+ handler(w, req)
+ require.Equal(t, http.StatusBadRequest, w.Code)
+ require.Contains(t, w.Body.String(), testcase.err)
+ })
+ }
+}
diff --git a/tools/block-generator/generator/make_transactions.go b/tools/block-generator/generator/make_transactions.go
new file mode 100644
index 000000000..cd316779e
--- /dev/null
+++ b/tools/block-generator/generator/make_transactions.go
@@ -0,0 +1,97 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package generator
+
+import (
+ "encoding/binary"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+func (g *generator) makeTxnHeader(sender basics.Address, round, intra uint64) transactions.Header {
+ note := make([]byte, 8)
+ binary.LittleEndian.PutUint64(note, uint64(g.txnCounter+intra))
+
+ return transactions.Header{
+ Sender: sender,
+ Fee: basics.MicroAlgos{Raw: g.params.MinTxnFee},
+ FirstValid: basics.Round(round),
+ LastValid: basics.Round(round + 1000),
+ GenesisID: g.genesisID,
+ GenesisHash: g.genesisHash,
+ Note: note,
+ }
+}
+
+func (g *generator) makePaymentTxn(header transactions.Header, receiver basics.Address, amount uint64, closeRemainderTo basics.Address) transactions.Transaction {
+ return transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: header,
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: receiver,
+ Amount: basics.MicroAlgos{Raw: amount},
+ CloseRemainderTo: closeRemainderTo,
+ },
+ }
+}
+
+func (g *generator) makeAssetCreateTxn(header transactions.Header, total uint64, defaultFrozen bool, assetName string) transactions.Transaction {
+ return transactions.Transaction{
+ Type: protocol.AssetConfigTx,
+ Header: header,
+ AssetConfigTxnFields: transactions.AssetConfigTxnFields{
+ AssetParams: basics.AssetParams{
+ Total: total,
+ DefaultFrozen: defaultFrozen,
+ AssetName: assetName,
+ Manager: header.Sender,
+ Freeze: header.Sender,
+ Clawback: header.Sender,
+ Reserve: header.Sender,
+ },
+ },
+ }
+}
+
+func (g *generator) makeAssetDestroyTxn(header transactions.Header, index uint64) transactions.Transaction {
+ return transactions.Transaction{
+ Type: protocol.AssetConfigTx,
+ Header: header,
+ AssetConfigTxnFields: transactions.AssetConfigTxnFields{
+ ConfigAsset: basics.AssetIndex(index),
+ },
+ }
+}
+
+func (g *generator) makeAssetTransferTxn(header transactions.Header, receiver basics.Address, amount uint64, closeAssetsTo basics.Address, index uint64) transactions.Transaction {
+ return transactions.Transaction{
+ Type: protocol.AssetTransferTx,
+ Header: header,
+ AssetTransferTxnFields: transactions.AssetTransferTxnFields{
+ XferAsset: basics.AssetIndex(index),
+ AssetAmount: amount,
+ AssetReceiver: receiver,
+ AssetCloseTo: closeAssetsTo,
+ },
+ }
+}
+
+func (g *generator) makeAssetAcceptanceTxn(header transactions.Header, index uint64) transactions.Transaction {
+ return g.makeAssetTransferTxn(header, header.Sender, 0, basics.Address{}, index)
+}
diff --git a/tools/block-generator/generator/server.go b/tools/block-generator/generator/server.go
new file mode 100644
index 000000000..81a7546b1
--- /dev/null
+++ b/tools/block-generator/generator/server.go
@@ -0,0 +1,169 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package generator
+
+import (
+ "fmt"
+ "net/http"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/tools/block-generator/util"
+ "gopkg.in/yaml.v3"
+)
+
+func initializeConfigFile(configFile string) (config GenerationConfig, err error) {
+ data, err := os.ReadFile(configFile)
+ if err != nil {
+ return
+ }
+ err = yaml.Unmarshal(data, &config)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// MakeServer configures http handlers. Returns the http server.
+func MakeServer(configFile string, addr string) (*http.Server, Generator) {
+ noOp := func(next http.Handler) http.Handler {
+ return next
+ }
+ return MakeServerWithMiddleware(0, "", configFile, addr, noOp)
+}
+
+// BlocksMiddleware is a middleware for the blocks endpoint.
+type BlocksMiddleware func(next http.Handler) http.Handler
+
+// MakeServerWithMiddleware allows injecting a middleware for the blocks handler.
+// This is needed to simplify tests by stopping block production while validation
+// is done on the data.
+func MakeServerWithMiddleware(dbround uint64, genesisFile string, configFile string, addr string, blocksMiddleware BlocksMiddleware) (*http.Server, Generator) {
+ config, err := initializeConfigFile(configFile)
+ util.MaybeFail(err, "problem loading config file. Use '--config' or create a config file.")
+ var bkGenesis bookkeeping.Genesis
+ if genesisFile != "" {
+ bkGenesis, err = bookkeeping.LoadGenesisFromFile(genesisFile)
+ util.MaybeFail(err, "Failed to parse genesis file '%s'", genesisFile)
+ }
+ gen, err := MakeGenerator(dbround, bkGenesis, config)
+ util.MaybeFail(err, "Failed to make generator with config file '%s'", configFile)
+
+ mux := http.NewServeMux()
+ mux.HandleFunc("/", help)
+ mux.Handle("/v2/blocks/", blocksMiddleware(http.HandlerFunc(getBlockHandler(gen))))
+ mux.HandleFunc("/v2/accounts/", getAccountHandler(gen))
+ mux.HandleFunc("/genesis", getGenesisHandler(gen))
+ mux.HandleFunc("/report", getReportHandler(gen))
+ mux.HandleFunc("/v2/status/wait-for-block-after/", getStatusWaitHandler(gen))
+ mux.HandleFunc("/v2/ledger/sync/", func(w http.ResponseWriter, r *http.Request) {})
+ mux.HandleFunc("/v2/deltas/", getDeltasHandler(gen))
+
+ return &http.Server{
+ Addr: addr,
+ Handler: mux,
+ ReadHeaderTimeout: 3 * time.Second,
+ }, gen
+}
+
+func help(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintf(w, "Use /v2/blocks/:blocknum: to get a block.")
+}
+
+func maybeWriteError(w http.ResponseWriter, err error) {
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+}
+
+func getReportHandler(gen Generator) func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
+ maybeWriteError(w, gen.WriteReport(w))
+ }
+}
+
+func getStatusWaitHandler(gen Generator) func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
+ maybeWriteError(w, gen.WriteStatus(w))
+ }
+}
+
+func getGenesisHandler(gen Generator) func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
+ maybeWriteError(w, gen.WriteGenesis(w))
+ }
+}
+
+func getBlockHandler(gen Generator) func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
+ // The generator doesn't actually care about the block...
+ s, err := parseURL(r.URL.Path)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ round, err := strconv.ParseUint(s, 0, 64)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ maybeWriteError(w, gen.WriteBlock(w, round))
+ }
+}
+
+func getAccountHandler(gen Generator) func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
+ // The generator doesn't actually care about the block...
+ account, err := parseURL(r.URL.Path)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ maybeWriteError(w, gen.WriteAccount(w, account))
+ }
+}
+
+func getDeltasHandler(gen Generator) func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
+ s, err := parseURL(r.URL.Path)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ round, err := strconv.ParseUint(s, 0, 64)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ maybeWriteError(w, gen.WriteDeltas(w, round))
+ }
+}
+
+func parseURL(path string) (string, error) {
+ i := strings.LastIndex(path, "/")
+ if i == len(path)-1 {
+ return "", fmt.Errorf("invalid request path, %s", path)
+ }
+ if strings.Contains(path[i+1:], "?") {
+ return strings.Split(path[i+1:], "?")[0], nil
+ }
+ return path[i+1:], nil
+}
diff --git a/tools/block-generator/generator/server_test.go b/tools/block-generator/generator/server_test.go
new file mode 100644
index 000000000..7007db00f
--- /dev/null
+++ b/tools/block-generator/generator/server_test.go
@@ -0,0 +1,129 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package generator
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestInitConfigFile(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ config, err := initializeConfigFile("../test_config.yml")
+ require.NoError(t, err)
+ require.Equal(t, uint64(10), config.NumGenesisAccounts)
+ require.Equal(t, float32(0.25), config.AssetCloseFraction)
+ require.Equal(t, float32(0.0), config.AssetDestroyFraction)
+}
+
+func TestInitConfigFileNotExist(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ _, err := initializeConfigFile("this_is_not_a_config_file")
+
+ if _, ok := err.(*os.PathError); !ok {
+ require.Fail(t, "This should generate a path error")
+ }
+}
+
+func TestParseURL(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ const blockQueryPrefix = "http://v2/blocks/"
+ const accountQueryPrefix = "http://v2/accounts/"
+ const deltaQueryPrefix = "http://v2/deltas/"
+ var testcases = []struct {
+ name string
+ url string
+ expectedParam string
+ err string
+ }{
+ {
+ name: "no block",
+ url: "/v2/blocks/",
+ expectedParam: "",
+ err: "invalid request path, /v2/blocks/",
+ },
+ {
+ name: "normal one digit",
+ url: fmt.Sprintf("%s1", blockQueryPrefix),
+ expectedParam: "1",
+ err: "",
+ },
+ {
+ name: "normal long number",
+ url: fmt.Sprintf("%s12345678", blockQueryPrefix),
+ expectedParam: "12345678",
+ err: "",
+ },
+ {
+ name: "with query parameters",
+ url: fmt.Sprintf("%s1234?pretty", blockQueryPrefix),
+ expectedParam: "1234",
+ err: "",
+ },
+ {
+ name: "with query parameters",
+ url: fmt.Sprintf("%s1234?pretty", blockQueryPrefix),
+ expectedParam: "1234",
+ err: "",
+ },
+ {
+ name: "no deltas",
+ url: "/v2/deltas/",
+ expectedParam: "",
+ err: "invalid request path, /v2/deltas/",
+ },
+ {
+ name: "deltas",
+ url: fmt.Sprintf("%s123?Format=msgp", deltaQueryPrefix),
+ expectedParam: "123",
+ err: "",
+ },
+ {
+ name: "no account",
+ url: "/v2/accounts/",
+ expectedParam: "",
+ err: "invalid request path, /v2/accounts/",
+ },
+ {
+ name: "accounts",
+ url: fmt.Sprintf("%sAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGFFWAF4", accountQueryPrefix),
+ expectedParam: "AIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGFFWAF4",
+ err: "",
+ },
+ }
+
+ for _, testcase := range testcases {
+ t.Run(testcase.name, func(t *testing.T) {
+ round, err := parseURL(testcase.url)
+ if len(testcase.err) == 0 {
+ msg := fmt.Sprintf("Unexpected error parsing '%s', expected round '%s' received error: %v",
+ testcase.url, testcase.expectedParam, err)
+ require.NoError(t, err, msg)
+ assert.Equal(t, testcase.expectedParam, round)
+ } else {
+ require.Error(t, err, fmt.Sprintf("Expected an error containing: %s", testcase.err))
+ require.True(t, strings.Contains(err.Error(), testcase.err))
+ }
+ })
+ }
+}
diff --git a/tools/block-generator/generator/utils.go b/tools/block-generator/generator/utils.go
new file mode 100644
index 000000000..5f048a025
--- /dev/null
+++ b/tools/block-generator/generator/utils.go
@@ -0,0 +1,81 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package generator
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math/rand"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-codec/codec"
+)
+
+func weightedSelection(weights []float32, options []interface{}, defaultOption interface{}) (selection interface{}, err error) {
+ return weightedSelectionInternal(rand.Float32(), weights, options, defaultOption)
+}
+
+func weightedSelectionInternal(selectionNumber float32, weights []float32, options []interface{}, defaultOption interface{}) (selection interface{}, err error) {
+ if len(weights) != len(options) {
+ err = fmt.Errorf("number of weights must equal number of options: %d != %d", len(weights), len(options))
+ return
+ }
+
+ total := float32(0)
+ for i := 0; i < len(weights); i++ {
+ if selectionNumber-total < weights[i] {
+ selection = options[i]
+ return
+ }
+ total += weights[i]
+ }
+
+ selection = defaultOption
+ return
+}
+
+func indexToAccount(i uint64) (addr basics.Address) {
+ // Make sure we don't generate a zero address by adding 1 to i
+ binary.LittleEndian.PutUint64(addr[:], i+1)
+ return
+}
+
+func accountToIndex(a basics.Address) (addr uint64) {
+ // Make sure we don't generate a zero address by adding 1 to i
+ return binary.LittleEndian.Uint64(a[:]) - 1
+}
+
+func convertToGenesisBalances(balances []uint64) map[basics.Address]basics.AccountData {
+ genesisBalances := make(map[basics.Address]basics.AccountData)
+ for i, balance := range balances {
+ genesisBalances[indexToAccount(uint64(i))] = basics.AccountData{
+ MicroAlgos: basics.MicroAlgos{Raw: balance},
+ }
+ }
+ return genesisBalances
+}
+
+func encode(handle codec.Handle, obj interface{}) ([]byte, error) {
+ var output []byte
+ enc := codec.NewEncoderBytes(&output, handle)
+
+ err := enc.Encode(obj)
+ if err != nil {
+ return nil, fmt.Errorf("failed to encode object: %w", err)
+ }
+ return output, nil
+}
diff --git a/tools/block-generator/generator/utils_test.go b/tools/block-generator/generator/utils_test.go
new file mode 100644
index 000000000..fb69a43fe
--- /dev/null
+++ b/tools/block-generator/generator/utils_test.go
@@ -0,0 +1,132 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package generator
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestWeightedSelectionInternalBadInput(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ weights := []float32{0.10, 0.30}
+ options := []interface{}{"10"}
+ _, err := weightedSelectionInternal(0, weights, options, nil)
+ require.EqualError(t, err, "number of weights must equal number of options: 2 != 1")
+}
+
+func TestWeightedSelectionInternal(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ weights := []float32{0.10, 0.30, 0.60}
+ options := []interface{}{"10", "30", "60"}
+
+ testcases := []struct {
+ selectionNum float32
+ expected interface{}
+ }{
+ {
+ selectionNum: 0.0,
+ expected: options[0],
+ },
+ {
+ selectionNum: 0.099,
+ expected: options[0],
+ },
+ {
+ selectionNum: 0.1,
+ expected: options[1],
+ },
+ {
+ selectionNum: 0.399,
+ expected: options[1],
+ },
+ {
+ selectionNum: 0.4,
+ expected: options[2],
+ },
+ {
+ selectionNum: 0.999,
+ expected: options[2],
+ },
+ }
+
+ for _, test := range testcases {
+ name := fmt.Sprintf("selectionNum %f - expected %v", test.selectionNum, test.expected)
+ t.Run(name, func(t *testing.T) {
+ actual, err := weightedSelectionInternal(test.selectionNum, weights, options, nil)
+ require.NoError(t, err)
+ require.Equal(t, test.expected, actual)
+ })
+ }
+}
+
+func TestWeightedSelection(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ weights := []float32{0.10, 0.30, 0.60}
+ options := []interface{}{"10", "30", "60"}
+ selections := make(map[interface{}]int)
+
+ for i := 0; i < 100; i++ {
+ selected, err := weightedSelection(weights, options, nil)
+ require.NoError(t, err)
+ selections[selected]++
+ }
+
+ assert.Less(t, selections[options[0]], selections[options[1]])
+ assert.Less(t, selections[options[1]], selections[options[2]])
+}
+
+func TestWeightedSelectionOutOfRange(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ weights := []float32{0.1}
+ options := []interface{}{"1"}
+ defaultOption := "DEFAULT!"
+
+ for i := 0; i < 10000; i++ {
+ selection, err := weightedSelection(weights, options, defaultOption)
+ require.NoError(t, err)
+ if selection == defaultOption {
+ return
+ }
+ }
+ assert.Fail(t, "Expected an out of range error by this point.")
+}
+
+func TestConvertToGenesisBalance(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ balance := []uint64{100, 200, 300}
+ genesisBalances := convertToGenesisBalances(balance)
+ require.Equal(t, 3, len(genesisBalances))
+ for i, bal := range balance {
+ require.Equal(t, bal, genesisBalances[indexToAccount(uint64(i))].MicroAlgos.Raw)
+ }
+}
+
+func TestIndexToAccountAndAccountToIndex(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ account := indexToAccount(1)
+ fmt.Printf("account: %v\n", account)
+ for i := uint64(0); i < uint64(100000); i++ {
+ acct := indexToAccount(i)
+ result := accountToIndex(acct)
+ require.Equal(t, i, result)
+ }
+}
diff --git a/tools/block-generator/go.mod b/tools/block-generator/go.mod
new file mode 100644
index 000000000..306627f71
--- /dev/null
+++ b/tools/block-generator/go.mod
@@ -0,0 +1,50 @@
+module github.com/algorand/go-algorand/tools/block-generator
+
+replace github.com/algorand/go-algorand => ../..
+
+go 1.17
+
+require (
+ github.com/algorand/go-algorand v0.0.0-00010101000000-000000000000
+ github.com/algorand/go-codec/codec v1.1.9
+ github.com/algorand/go-deadlock v0.2.2
+ github.com/lib/pq v1.10.9
+ github.com/spf13/cobra v1.7.0
+ github.com/stretchr/testify v1.8.3
+ gopkg.in/yaml.v3 v3.0.1
+)
+
+require (
+ github.com/DataDog/zstd v1.5.2 // indirect
+ github.com/algorand/avm-abi v0.2.0 // indirect
+ github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414 // indirect
+ github.com/algorand/go-sumhash v0.1.0 // indirect
+ github.com/algorand/msgp v1.1.53 // indirect
+ github.com/algorand/oapi-codegen v1.12.0-algorand.0 // indirect
+ github.com/algorand/websocket v1.4.6 // indirect
+ github.com/aws/aws-sdk-go v1.33.0 // indirect
+ github.com/consensys/gnark-crypto v0.7.0 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018 // indirect
+ github.com/dchest/siphash v1.2.1 // indirect
+ github.com/golang/snappy v0.0.4 // indirect
+ github.com/google/uuid v1.3.0 // indirect
+ github.com/gorilla/mux v1.8.0 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/jmespath/go-jmespath v0.3.0 // indirect
+ github.com/josharian/intern v1.0.0 // indirect
+ github.com/mailru/easyjson v0.7.7 // indirect
+ github.com/mattn/go-sqlite3 v1.10.0 // indirect
+ github.com/miekg/dns v1.1.41 // indirect
+ github.com/mmcloughlin/addchain v0.4.0 // indirect
+ github.com/olivere/elastic v6.2.14+incompatible // indirect
+ github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/sirupsen/logrus v1.8.1 // indirect
+ github.com/spf13/pflag v1.0.5 // indirect
+ golang.org/x/crypto v0.1.0 // indirect
+ golang.org/x/net v0.9.0 // indirect
+ golang.org/x/sys v0.7.0 // indirect
+ gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009 // indirect
+)
diff --git a/tools/block-generator/go.sum b/tools/block-generator/go.sum
new file mode 100644
index 000000000..bd2732c8f
--- /dev/null
+++ b/tools/block-generator/go.sum
@@ -0,0 +1,948 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
+cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
+cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
+cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
+cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
+cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
+cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
+cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM=
+cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
+github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8=
+github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/algorand/avm-abi v0.2.0 h1:bkjsG+BOEcxUcnGSALLosmltE0JZdg+ZisXKx0UDX2k=
+github.com/algorand/avm-abi v0.2.0/go.mod h1:+CgwM46dithy850bpTeHh9MC99zpn2Snirb3QTl2O/g=
+github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414 h1:nwYN+GQ7Z5OOfZwqBO1ma7DSlP7S1YrKWICOyjkwqrc=
+github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414/go.mod h1:OkQyHlGvS0kLNcIWbC21/uQcnbfwSOQm+wiqWwBG9pQ=
+github.com/algorand/go-codec/codec v1.1.9 h1:el4HFSPZhP+YCgOZxeFGB/BqlNkaUIs55xcALulUTCM=
+github.com/algorand/go-codec/codec v1.1.9/go.mod h1:YkEx5nmr/zuCeaDYOIhlDg92Lxju8tj2d2NrYqP7g7k=
+github.com/algorand/go-deadlock v0.2.2 h1:L7AKATSUCzoeVuOgpTipfCEjdUu5ECmlje8R7lP9DOY=
+github.com/algorand/go-deadlock v0.2.2/go.mod h1:Hat1OXKqKNUcN/iv74FjGhF4hsOE2l7gOgQ9ZVIq6Fk=
+github.com/algorand/go-sumhash v0.1.0 h1:b/QRhyLuF//vOcicBIxBXYW8bERNoeLxieht/dUYpVg=
+github.com/algorand/go-sumhash v0.1.0/go.mod h1:OOe7jdDWUhLkuP1XytkK5gnLu9entAviN5DfDZh6XAc=
+github.com/algorand/graphtrace v0.1.0/go.mod h1:HscLQrzBdH1BH+5oehs3ICd8SYcXvnSL9BjfTu8WHCc=
+github.com/algorand/msgp v1.1.53 h1:D6HKLyvLE6ltfsf8Apsrc+kqYb/CcOZEAfh1DpkPrNg=
+github.com/algorand/msgp v1.1.53/go.mod h1:5K3d58/poT5fPmtiwuQft6GjgSrVEM46KoXdLrID8ZU=
+github.com/algorand/oapi-codegen v1.12.0-algorand.0 h1:W9PvED+wAJc+9EeXPONnA+0zE9UhynEqoDs4OgAxKhk=
+github.com/algorand/oapi-codegen v1.12.0-algorand.0/go.mod h1:tIWJ9K/qrLDVDt5A1p82UmxZIEGxv2X+uoujdhEAL48=
+github.com/algorand/websocket v1.4.6 h1:I0kV4EYwatuUrKtNiwzYYgojgwh6pksDmlqntKG2Woc=
+github.com/algorand/websocket v1.4.6/go.mod h1:HJmdGzFtnlUQ4nTzZP6WrT29oGYf1t6Ybi64vROcT+M=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/aws/aws-sdk-go v1.33.0 h1:Bq5Y6VTLbfnJp1IV8EL/qUU5qO1DYHda/zis/sqevkY=
+github.com/aws/aws-sdk-go v1.33.0/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e h1:CHPYEbz71w8DqJ7DRIq+MXyCQsdibK08vdcQTY4ufas=
+github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e/go.mod h1:6Xhs0ZlsRjXLIiSMLKafbZxML/j30pg9Z1priLuha5s=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
+github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/consensys/bavard v0.1.10/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
+github.com/consensys/gnark-crypto v0.7.0 h1:rwdy8+ssmLYRqKp+ryRRgQJl/rCq2uv+n83cOydm5UE=
+github.com/consensys/gnark-crypto v0.7.0/go.mod h1:KPSuJzyxkJA8xZ/+CV47tyqkr9MmpZA3PXivK4VPrVg=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/daixiang0/gci v0.3.2/go.mod h1:jaASoJmv/ykO9dAAPy31iJnreV19248qKDdVWf3QgC4=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018 h1:6xT9KW8zLC5IlbaIF5Q7JNieBoACT7iW0YTxQHR0in0=
+github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4=
+github.com/dchest/siphash v1.2.1 h1:4cLinnzVJDKxTCl9B01807Yiy+W7ZzVHj/KIroQRvT4=
+github.com/dchest/siphash v1.2.1/go.mod h1:q+IRvb2gOSrUnYoPqHiyHXS0FOBBOdl6tONBlVnOnt4=
+github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d/go.mod h1:tmAIfUFEirG/Y8jhZ9M+h36obRZAk/1fcSpXwAVlfqE=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
+github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
+github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
+github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
+github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
+github.com/getkin/kin-openapi v0.107.0/go.mod h1:9Dhr+FasATJZjS4iOLvB0hkaxgYdulrNYm2e9epLWOo=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
+github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
+github.com/go-chi/chi/v5 v5.0.7/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
+github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
+github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
+github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gofrs/flock v0.7.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
+github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
+github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
+github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
+github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
+github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
+github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
+github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
+github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
+github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
+github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
+github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
+github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
+github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
+github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/invopop/yaml v0.1.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q=
+github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
+github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
+github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
+github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/labstack/echo/v4 v4.9.1/go.mod h1:Pop5HLc+xoc4qhTZ1ip6C0RtP7Z+4VzRLWZZFKqbbjo=
+github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
+github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
+github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
+github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
+github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y=
+github.com/lestrrat-go/blackmagic v1.0.0/go.mod h1:TNgH//0vYSs8VXDCfkZLgIrVTTXQELZffUV0tz3MtdQ=
+github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=
+github.com/lestrrat-go/iter v1.0.1/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc=
+github.com/lestrrat-go/jwx v1.2.25/go.mod h1:zoNuZymNl5lgdcu6P7K6ie2QRll5HVfF4xwxBBK1NxY=
+github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
+github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
+github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/matryer/moq v0.2.7/go.mod h1:kITsx543GOENm48TUAQyJ9+SAvFSr7iGQXPoth/VUBk=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
+github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
+github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
+github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
+github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
+github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY=
+github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
+github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
+github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
+github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/olivere/elastic v6.2.14+incompatible h1:k+KadwNP/dkXE0/eu+T6otk1+5fe0tEpPyQJ4XVm5i8=
+github.com/olivere/elastic v6.2.14+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
+github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ=
+github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
+github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
+github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4=
+github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
+github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM=
+github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
+github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q=
+github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
+github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
+github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
+github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
+github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
+go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
+golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
+google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
+google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
+google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
+google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
+google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
+google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU=
+google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
+google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
+google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009 h1:q/fZgS8MMadqFFGa8WL4Oyz+TmjiZfi8UrzWhTl8d5w=
+gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009/go.mod h1:O0bY1e/dSoxMYZYTHP0SWKxG5EWLEvKR9/cOjWPPMKU=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
diff --git a/tools/block-generator/main.go b/tools/block-generator/main.go
new file mode 100644
index 000000000..6525bc4a6
--- /dev/null
+++ b/tools/block-generator/main.go
@@ -0,0 +1,26 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package main
+
+import "github.com/algorand/go-algorand/tools/block-generator/core"
+
+func main() {
+ err := core.BlockGenerator.Execute()
+ if err != nil {
+ panic(err)
+ }
+}
diff --git a/tools/block-generator/requirements.txt b/tools/block-generator/requirements.txt
new file mode 100644
index 000000000..829c2ea81
--- /dev/null
+++ b/tools/block-generator/requirements.txt
@@ -0,0 +1 @@
+datadog==0.45.0 \ No newline at end of file
diff --git a/tools/block-generator/run_postgres.sh b/tools/block-generator/run_postgres.sh
new file mode 100755
index 000000000..2c8175bb9
--- /dev/null
+++ b/tools/block-generator/run_postgres.sh
@@ -0,0 +1,49 @@
+#!/usr/bin/env bash
+
+# This script is useful if you want to launch the runner
+# in a debugger. Simply start this script and run with:
+# ./block-generator runner \
+# -d 5s \
+# -i <path to conduit binary> \
+# -c "host=localhost user=algorand password=algorand dbname=generator_db port=15432 sslmode=disable" \
+# -r results \
+# -s scenarios/config.payment.small.yml
+
+set -e
+
+POSTGRES_CONTAINER=generator-test-container
+POSTGRES_PORT=15432
+POSTGRES_DATABASE=generator_db
+CONFIG=${1:-"$(dirname $0)/test_config.yml"}
+echo "Using config file: $CONFIG"
+
+function start_postgres() {
+ docker rm -f $POSTGRES_CONTAINER > /dev/null 2>&1 || true
+
+ # Start postgres container...
+ docker run \
+ -d \
+ --name $POSTGRES_CONTAINER \
+ -e POSTGRES_USER=algorand \
+ -e POSTGRES_PASSWORD=algorand \
+ -e PGPASSWORD=algorand \
+ -p $POSTGRES_PORT:5432 \
+ postgres:13-alpine
+
+ sleep 5
+
+ docker exec -it $POSTGRES_CONTAINER psql -Ualgorand -c "create database $POSTGRES_DATABASE"
+}
+
+function shutdown() {
+ docker rm -f $POSTGRES_CONTAINER > /dev/null 2>&1 || true
+}
+
+trap shutdown EXIT
+
+pushd $(dirname "$0") > /dev/null
+echo "Starting postgres container at: \n\t\"host=localhost user=algorand password=algorand dbname=generator_db port=15432\""
+start_postgres
+echo "Sleeping, use Ctrl-C to end test."
+sleep 100000000
+
diff --git a/tools/block-generator/run_runner.sh b/tools/block-generator/run_runner.sh
new file mode 100755
index 000000000..5e8396631
--- /dev/null
+++ b/tools/block-generator/run_runner.sh
@@ -0,0 +1,60 @@
+#!/usr/bin/env bash
+
+# Demonstrate how to run the block-generator runner.
+
+set -e
+
+OUTPUT=../../tmp/OUTPUT_RUN_RUNNER_TEST
+
+CONDUIT_BINARY=$1
+if [ -z "$CONDUIT_BINARY" ]; then
+ echo "path to conduit binary is required"
+ exit 1
+fi
+
+POSTGRES_CONTAINER=generator-test-container
+POSTGRES_PORT=15432
+POSTGRES_DATABASE=generator_db
+SCENARIO=${2:-"$(dirname $0)/test_config.yml"}
+echo "Using scenario config file: $SCENARIO"
+
+function start_postgres() {
+ docker rm -f $POSTGRES_CONTAINER > /dev/null 2>&1 || true
+
+ # Start postgres container...
+ docker run \
+ -d \
+ --name $POSTGRES_CONTAINER \
+ -e POSTGRES_USER=algorand \
+ -e POSTGRES_PASSWORD=algorand \
+ -e PGPASSWORD=algorand \
+ -p $POSTGRES_PORT:5432 \
+ postgres:13-alpine
+
+ sleep 5
+
+ docker exec -it $POSTGRES_CONTAINER psql -Ualgorand -c "create database $POSTGRES_DATABASE"
+}
+
+function shutdown() {
+ docker rm -f $POSTGRES_CONTAINER > /dev/null 2>&1 || true
+}
+
+trap shutdown EXIT
+
+rm -rf $OUTPUT > /dev/null 2>&1
+echo "Building generator."
+pushd $(dirname "$0") > /dev/null
+go build
+popd
+echo "Starting postgres container."
+start_postgres
+echo "Starting test runner"
+$(dirname "$0")/block-generator runner \
+ --conduit-binary "$CONDUIT_BINARY" \
+ --report-directory $OUTPUT \
+ --test-duration 30s \
+ --log-level trace \
+ --postgres-connection-string "host=localhost user=algorand password=algorand dbname=generator_db port=15432 sslmode=disable" \
+ --scenario ${SCENARIO} \
+ --reset-db
diff --git a/tools/block-generator/run_tests.sh b/tools/block-generator/run_tests.sh
new file mode 100755
index 000000000..fcfc7279e
--- /dev/null
+++ b/tools/block-generator/run_tests.sh
@@ -0,0 +1,97 @@
+#!/usr/bin/env bash
+
+CONNECTION_STRING=""
+CONDUIT_BINARY=""
+REPORT_DIR=""
+DURATION="1h"
+LOG_LEVEL="error"
+SCENARIOS=""
+
+help() {
+ echo "Usage:"
+ echo " -v|--verbose enable verbose script output."
+ echo " -c|--connection-string"
+ echo " PostgreSQL connection string."
+ echo " -i|--conduit path to conduit binary."
+ echo " -s|--scenarios path to conduit test scenarios."
+ echo " -r|--report-dir directory where the report should be written."
+ echo " -d|--duration test duration."
+ echo " -l|--level log level to pass to conduit."
+ echo " -g|--generator block-generator binary to run the generator."
+ exit
+}
+
+while :; do
+ case "${1-}" in
+ -h | --help) help ;;
+ -v | --verbose) set -x ;;
+ -c | --connection-string)
+ CONNECTION_STRING="${2-}"
+ shift
+ ;;
+ -g | --generator)
+ GENERATOR_BINARY="${2-}"
+ shift
+ ;;
+ -i | --conduit)
+ CONDUIT_BINARY="${2-}"
+ shift
+ ;;
+ -r | --report-dir)
+ REPORT_DIR="${2-}"
+ shift
+ ;;
+ -s | --scenarios)
+ SCENARIOS="${2-}"
+ shift
+ ;;
+ -d | --duration)
+ DURATION="${2-}"
+ shift
+ ;;
+ -l | --level)
+ LOG_LEVEL="${2-}"
+ shift
+ ;;
+ -?*) echo "Unknown option: $1" && exit 1;;
+ *) break ;;
+ esac
+ shift
+done
+
+args=("$@")
+
+if [ -z "$CONNECTION_STRING" ]; then
+ echo "Missing required connection string parameter (-c / --connection-string)."
+ exit 1
+fi
+
+if [ -z "$CONDUIT_BINARY" ]; then
+ echo "Missing required conduit binary parameter (-i / --conduit)."
+ exit 1
+fi
+
+if [ -z "$SCENARIOS" ]; then
+ echo "Missing required conduit test scenario parameter (-s / --scenarios)."
+ exit 1
+fi
+
+if [ -z "$GENERATOR_BINARY" ]; then
+ echo "path to block-generator binary is required"
+ exit 1
+fi
+
+echo "Running with binary: $CONDUIT_BINARY"
+echo "Report directory: $REPORT_DIR"
+echo "Duration: $DURATION"
+echo "Log Level: $LOG_LEVEL"
+
+"$GENERATOR_BINARY" runner \
+ -i "$CONDUIT_BINARY" \
+ -s "$SCENARIOS" \
+ -d "$DURATION" \
+ -c "$CONNECTION_STRING" \
+ --report-directory "$REPORT_DIR" \
+ --log-level "$LOG_LEVEL" \
+ --reset-report-dir
+
diff --git a/tools/block-generator/runner/metrics_collector.go b/tools/block-generator/runner/metrics_collector.go
new file mode 100644
index 000000000..394bf32cb
--- /dev/null
+++ b/tools/block-generator/runner/metrics_collector.go
@@ -0,0 +1,113 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package runner
+
+import (
+ "bufio"
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+)
+
+// Prometheus metrics collected in Conduit.
+const (
+ BlockImportTimeName = "import_time_sec"
+ ImportedTxnsPerBlockName = "imported_tx_per_block"
+ ImportedRoundGaugeName = "imported_round"
+ GetAlgodRawBlockTimeName = "get_algod_raw_block_time_sec"
+ ImportedTxnsName = "imported_txns"
+ ImporterTimeName = "importer_time_sec"
+ ProcessorTimeName = "processor_time_sec"
+ ExporterTimeName = "exporter_time_sec"
+ PipelineRetryCountName = "pipeline_retry_count"
+)
+
+// AllMetricNames is a reference for all the custom metric names.
+var AllMetricNames = []string{
+ BlockImportTimeName,
+ ImportedTxnsPerBlockName,
+ ImportedRoundGaugeName,
+ GetAlgodRawBlockTimeName,
+ ImporterTimeName,
+ ProcessorTimeName,
+ ExporterTimeName,
+ PipelineRetryCountName,
+}
+
+// MetricsCollector queries a /metrics endpoint for prometheus style metrics and saves metrics matching a pattern.
+type MetricsCollector struct {
+ // MetricsURL where metrics can be queried.
+ MetricsURL string
+ // Data is all of the results.
+ Data []Entry
+}
+
+// Entry is the raw data pulled from the endpoint along with a timestamp.
+type Entry struct {
+ Timestamp time.Time
+ Data []string
+}
+
+// Collect fetches the metrics.
+func (r *MetricsCollector) Collect(substrings ...string) error {
+ metrics, err := r.getMetrics(substrings...)
+ if err != nil {
+ return err
+ }
+
+ if len(metrics) > 0 {
+ entry := Entry{
+ Timestamp: time.Now(),
+ Data: metrics,
+ }
+ r.Data = append(r.Data, entry)
+ }
+
+ return nil
+}
+
+func (r MetricsCollector) getMetrics(substrings ...string) (result []string, err error) {
+ resp, err := http.Get(r.MetricsURL)
+ if err != nil {
+ err = fmt.Errorf("unable to read metrics url '%s'", r.MetricsURL)
+ return
+ }
+ defer resp.Body.Close()
+
+ scanner := bufio.NewScanner(resp.Body)
+ for scanner.Scan() {
+ str := scanner.Text()
+
+ if strings.HasPrefix(str, "#") {
+ continue
+ }
+
+ for _, substring := range substrings {
+ if strings.Contains(str, substring) {
+ result = append(result, str)
+ break
+ }
+ }
+ }
+
+ if scanner.Err() != nil {
+ err = fmt.Errorf("problem reading metrics response: %w", scanner.Err())
+ }
+
+ return
+}
diff --git a/tools/block-generator/runner/run.go b/tools/block-generator/runner/run.go
new file mode 100644
index 000000000..2e1f970c5
--- /dev/null
+++ b/tools/block-generator/runner/run.go
@@ -0,0 +1,463 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package runner
+
+import (
+ "bytes"
+ "context"
+ // embed conduit template config file
+ _ "embed"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "text/template"
+ "time"
+
+ "github.com/algorand/go-algorand/tools/block-generator/generator"
+ "github.com/algorand/go-algorand/tools/block-generator/util"
+ "github.com/algorand/go-deadlock"
+)
+
+//go:embed template/conduit.yml.tmpl
+var conduitConfigTmpl string
+
+// Args are all the things needed to run a performance test.
+type Args struct {
+ // Path is a directory when passed to RunBatch, otherwise a file path.
+ Path string
+ ConduitBinary string
+ MetricsPort uint64
+ PostgresConnectionString string
+ CPUProfilePath string
+ RunDuration time.Duration
+ LogLevel string
+ ReportDirectory string
+ ResetReportDir bool
+ RunValidation bool
+ KeepDataDir bool
+ GenesisFile string
+ ResetDB bool
+}
+
+type config struct {
+ LogLevel string
+ LogFile string
+ MetricsPort string
+ AlgodNet string
+ PostgresConnectionString string
+}
+
+// Run is a public helper to run the tests.
+// The test will run against the generator configuration file specified by 'args.Path'.
+// If 'args.Path' is a directory it should contain generator configuration files, a test will run using each file.
+func Run(args Args) error {
+ if _, err := os.Stat(args.ReportDirectory); !os.IsNotExist(err) {
+ if args.ResetReportDir {
+ fmt.Printf("Resetting existing report directory '%s'\n", args.ReportDirectory)
+ if err := os.RemoveAll(args.ReportDirectory); err != nil {
+ return fmt.Errorf("failed to reset report directory: %w", err)
+ }
+ } else {
+ return fmt.Errorf("report directory '%s' already exists", args.ReportDirectory)
+ }
+ }
+ err := os.Mkdir(args.ReportDirectory, os.ModeDir|os.ModePerm)
+ if err != nil {
+ return err
+ }
+
+ defer fmt.Println("Done running tests!")
+ return filepath.Walk(args.Path, func(path string, info os.FileInfo, err error) error {
+ // Ignore the directory
+ if info.IsDir() {
+ return nil
+ }
+ runnerArgs := args
+ runnerArgs.Path = path
+ fmt.Printf("Running test for configuration '%s'\n", path)
+ return runnerArgs.run()
+ })
+}
+
+func (r *Args) run() error {
+ baseName := filepath.Base(r.Path)
+ baseNameNoExt := strings.TrimSuffix(baseName, filepath.Ext(baseName))
+ reportfile := path.Join(r.ReportDirectory, fmt.Sprintf("%s.report", baseNameNoExt))
+ logfile := path.Join(r.ReportDirectory, fmt.Sprintf("%s.conduit-log", baseNameNoExt))
+ dataDir := path.Join(r.ReportDirectory, fmt.Sprintf("%s_data", baseNameNoExt))
+ // create the data directory.
+ if err := os.Mkdir(dataDir, os.ModeDir|os.ModePerm); err != nil {
+ return fmt.Errorf("failed to create data directory: %w", err)
+ }
+ if !r.KeepDataDir {
+ defer os.RemoveAll(dataDir)
+ }
+
+ // This middleware allows us to lock the block endpoint
+ var freezeMutex deadlock.Mutex
+ blockMiddleware := func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ freezeMutex.Lock()
+ defer freezeMutex.Unlock()
+ next.ServeHTTP(w, r)
+ })
+ }
+ // get next db round
+ var nextRound uint64
+ var err error
+ if r.ResetDB {
+ if err = util.EmptyDB(r.PostgresConnectionString); err != nil {
+ return fmt.Errorf("emptyDB err: %w", err)
+ }
+ nextRound = 0
+ } else {
+ nextRound, err = util.GetNextRound(r.PostgresConnectionString)
+ if err != nil && err == util.ErrorNotInitialized {
+ nextRound = 0
+ } else if err != nil {
+ return fmt.Errorf("getNextRound err: %w", err)
+ }
+ }
+ // Start services
+ algodNet := fmt.Sprintf("localhost:%d", 11112)
+ metricsNet := fmt.Sprintf("localhost:%d", r.MetricsPort)
+ generatorShutdownFunc, _ := startGenerator(r.Path, nextRound, r.GenesisFile, algodNet, blockMiddleware)
+ defer func() {
+ // Shutdown generator.
+ if err := generatorShutdownFunc(); err != nil {
+ fmt.Printf("failed to shutdown generator: %s\n", err)
+ }
+ }()
+ // get conduit config template
+ t, err := template.New("conduit").Parse(conduitConfigTmpl)
+ if err != nil {
+ return fmt.Errorf("unable to parse conduit config template: %w", err)
+ }
+
+ // create config file in the right data directory
+ f, err := os.Create(path.Join(dataDir, "conduit.yml"))
+ if err != nil {
+ return fmt.Errorf("problem creating conduit.yml: %w", err)
+ }
+ defer f.Close()
+
+ conduitConfig := config{r.LogLevel, logfile,
+ fmt.Sprintf(":%d", r.MetricsPort),
+ algodNet, r.PostgresConnectionString,
+ }
+
+ err = t.Execute(f, conduitConfig)
+ if err != nil {
+ return fmt.Errorf("problem executing template file: %w", err)
+ }
+
+ // Start conduit
+ conduitShutdownFunc, err := startConduit(dataDir, r.ConduitBinary, nextRound)
+ if err != nil {
+ return fmt.Errorf("failed to start Conduit: %w", err)
+ }
+ defer func() {
+ // Shutdown conduit
+ if err := conduitShutdownFunc(); err != nil {
+ fmt.Printf("failed to shutdown Conduit: %s\n", err)
+ }
+ }()
+
+ // Create the report file
+ report, err := os.Create(reportfile)
+ if err != nil {
+ return fmt.Errorf("unable to create report: %w", err)
+ }
+ defer report.Close()
+
+ // Run the test, collecting results.
+ // check /metrics endpoint is available before running the test
+ var resp *http.Response
+ for retry := 0; retry < 10; retry++ {
+ resp, err = http.Get(fmt.Sprintf("http://%s/metrics", metricsNet))
+ if err == nil {
+ resp.Body.Close()
+ break
+ }
+ time.Sleep(3 * time.Second)
+ }
+ if err != nil {
+ return fmt.Errorf("failed to query metrics endpoint: %w", err)
+ }
+ if err = r.runTest(report, metricsNet, algodNet); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type metricType int
+
+const (
+ rate metricType = iota
+ intTotal
+ floatTotal
+)
+
+// Helper to record metrics. Supports rates (sum/count) and counters.
+func recordDataToFile(start time.Time, entry Entry, prefix string, out *os.File) error {
+ var writeErrors []string
+ var writeErr error
+ record := func(prefix2, name string, t metricType) {
+ key := fmt.Sprintf("%s%s_%s", prefix, prefix2, name)
+ if err := recordMetricToFile(entry, key, name, t, out); err != nil {
+ writeErr = err
+ writeErrors = append(writeErrors, name)
+ }
+ }
+
+ record("_average", BlockImportTimeName, rate)
+ record("_cumulative", BlockImportTimeName, floatTotal)
+ record("_average", ImportedTxnsPerBlockName, rate)
+ record("_cumulative", ImportedTxnsPerBlockName, intTotal)
+ record("", ImportedRoundGaugeName, intTotal)
+
+ if len(writeErrors) > 0 {
+ return fmt.Errorf("error writing metrics (%s): %w", strings.Join(writeErrors, ", "), writeErr)
+ }
+
+ // Calculate import transactions per second.
+ totalTxn, err := getMetric(entry, ImportedTxnsPerBlockName, false)
+ if err != nil {
+ return err
+ }
+
+ importTimeS, err := getMetric(entry, BlockImportTimeName, false)
+ if err != nil {
+ return err
+ }
+ tps := totalTxn / importTimeS
+ key := "overall_transactions_per_second"
+ msg := fmt.Sprintf("%s_%s:%.2f\n", prefix, key, tps)
+ if _, err := out.WriteString(msg); err != nil {
+ return fmt.Errorf("unable to write metric '%s': %w", key, err)
+ }
+
+ // Uptime
+ key = "uptime_seconds"
+ msg = fmt.Sprintf("%s_%s:%.2f\n", prefix, key, time.Since(start).Seconds())
+ if _, err := out.WriteString(msg); err != nil {
+ return fmt.Errorf("unable to write metric '%s': %w", key, err)
+ }
+
+ return nil
+}
+
+func recordMetricToFile(entry Entry, outputKey, metricSuffix string, t metricType, out *os.File) error {
+ value, err := getMetric(entry, metricSuffix, t == rate)
+ if err != nil {
+ return err
+ }
+
+ var msg string
+ if t == intTotal {
+ msg = fmt.Sprintf("%s:%d\n", outputKey, uint64(value))
+ } else {
+ msg = fmt.Sprintf("%s:%.2f\n", outputKey, value)
+ }
+
+ if _, err := out.WriteString(msg); err != nil {
+ return fmt.Errorf("unable to write metric '%s': %w", outputKey, err)
+ }
+
+ return nil
+}
+
+func getMetric(entry Entry, suffix string, rateMetric bool) (float64, error) {
+ total := 0.0
+ sum := 0.0
+ count := 0.0
+ hasSum := false
+ hasCount := false
+ hasTotal := false
+
+ for _, metric := range entry.Data {
+ var err error
+
+ if strings.Contains(metric, suffix) {
+ split := strings.Split(metric, " ")
+ if len(split) != 2 {
+ return 0.0, fmt.Errorf("unknown metric format, expected 'key value' received: %s", metric)
+ }
+
+ // Check for _sum / _count for summary (rateMetric) metrics.
+ // Otherwise grab the total value.
+ if strings.HasSuffix(split[0], "_sum") {
+ sum, err = strconv.ParseFloat(split[1], 64)
+ hasSum = true
+ } else if strings.HasSuffix(split[0], "_count") {
+ count, err = strconv.ParseFloat(split[1], 64)
+ hasCount = true
+ } else if strings.HasSuffix(split[0], suffix) {
+ total, err = strconv.ParseFloat(split[1], 64)
+ hasTotal = true
+ }
+
+ if err != nil {
+ return 0.0, fmt.Errorf("unable to parse metric '%s': %w", metric, err)
+ }
+
+ if rateMetric && hasSum && hasCount {
+ return sum / count, nil
+ } else if !rateMetric {
+ if hasSum {
+ return sum, nil
+ }
+ if hasTotal {
+ return total, nil
+ }
+ }
+ }
+ }
+
+ return 0.0, fmt.Errorf("metric incomplete or not found: %s", suffix)
+}
+
+// Run the test for 'RunDuration', collect metrics and write them to the 'ReportDirectory'
+func (r *Args) runTest(report *os.File, metricsURL string, generatorURL string) error {
+ collector := &MetricsCollector{MetricsURL: fmt.Sprintf("http://%s/metrics", metricsURL)}
+
+ // Run for r.RunDuration
+ start := time.Now()
+ count := 1
+ for time.Since(start) < r.RunDuration {
+ time.Sleep(r.RunDuration / 10)
+
+ if err := collector.Collect(AllMetricNames...); err != nil {
+ return fmt.Errorf("problem collecting metrics (%d / %s): %w", count, time.Since(start), err)
+ }
+ count++
+ }
+ if err := collector.Collect(AllMetricNames...); err != nil {
+ return fmt.Errorf("problem collecting final metrics (%d / %s): %w", count, time.Since(start), err)
+ }
+
+ // write scenario to report
+ scenario := path.Base(r.Path)
+ if _, err := report.WriteString(fmt.Sprintf("scenario:%s\n", scenario)); err != nil {
+ return fmt.Errorf("unable to write scenario to report: %w", err)
+ }
+ // Collect results.
+ durationStr := fmt.Sprintf("test_duration_seconds:%d\ntest_duration_actual_seconds:%f\n",
+ uint64(r.RunDuration.Seconds()),
+ time.Since(start).Seconds())
+ if _, err := report.WriteString(durationStr); err != nil {
+ return fmt.Errorf("unable to write duration metric: %w", err)
+ }
+
+ resp, err := http.Get(fmt.Sprintf("http://%s/report", generatorURL))
+ if err != nil {
+ return fmt.Errorf("generator report query failed")
+ }
+ defer resp.Body.Close()
+ var generatorReport generator.Report
+ if err = json.NewDecoder(resp.Body).Decode(&generatorReport); err != nil {
+ return fmt.Errorf("problem decoding generator report: %w", err)
+ }
+ for metric, entry := range generatorReport {
+ // Skip this one
+ if metric == "genesis" {
+ continue
+ }
+ str := fmt.Sprintf("transaction_%s_total:%d\n", metric, entry.GenerationCount)
+ if _, err = report.WriteString(str); err != nil {
+ return fmt.Errorf("unable to write transaction_count metric: %w", err)
+ }
+ }
+
+ // Record a rate from one of the first data points.
+ if len(collector.Data) > 5 {
+ if err = recordDataToFile(start, collector.Data[2], "early", report); err != nil {
+ return err
+ }
+ }
+
+ // Also record the final metrics.
+ if err = recordDataToFile(start, collector.Data[len(collector.Data)-1], "final", report); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// startGenerator starts the generator server.
+func startGenerator(configFile string, dbround uint64, genesisFile string, addr string, blockMiddleware func(http.Handler) http.Handler) (func() error, generator.Generator) {
+ // Start generator.
+ server, generator := generator.MakeServerWithMiddleware(dbround, genesisFile, configFile, addr, blockMiddleware)
+
+ // Start the server
+ go func() {
+ // always returns error. ErrServerClosed on graceful close
+ fmt.Printf("generator serving on %s\n", server.Addr)
+ if err := server.ListenAndServe(); err != http.ErrServerClosed {
+ util.MaybeFail(err, "ListenAndServe() failure to start with config file '%s'", configFile)
+ }
+ }()
+
+ return func() error {
+ // stop generator
+ defer generator.Stop()
+ // Shutdown blocks until the server has stopped.
+ if err := server.Shutdown(context.Background()); err != nil {
+ return fmt.Errorf("failed during generator graceful shutdown: %w", err)
+ }
+ return nil
+ }, generator
+}
+
+// startConduit starts the conduit binary.
+func startConduit(dataDir string, conduitBinary string, round uint64) (func() error, error) {
+ cmd := exec.Command(
+ conduitBinary,
+ "-r", strconv.FormatUint(round, 10),
+ "-d", dataDir,
+ )
+
+ var stdout bytes.Buffer
+ var stderr bytes.Buffer
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+
+ if err := cmd.Start(); err != nil {
+ return nil, fmt.Errorf("failure calling Start(): %w", err)
+ }
+ // conduit doesn't have health check endpoint. so, no health check for now
+
+ return func() error {
+ if err := cmd.Process.Signal(os.Interrupt); err != nil {
+ fmt.Printf("failed to kill conduit process: %s\n", err)
+ if err := cmd.Process.Kill(); err != nil {
+ return fmt.Errorf("failed to kill conduit process: %w", err)
+ }
+ }
+ if err := cmd.Wait(); err != nil {
+ fmt.Printf("ignoring error while waiting for process to stop: %s\n", err)
+ }
+ return nil
+ }, nil
+}
diff --git a/tools/block-generator/runner/runner.go b/tools/block-generator/runner/runner.go
new file mode 100644
index 000000000..32598b924
--- /dev/null
+++ b/tools/block-generator/runner/runner.go
@@ -0,0 +1,63 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package runner
+
+import (
+ "fmt"
+ "math/rand"
+ "time"
+
+ "github.com/spf13/cobra"
+)
+
+// RunnerCmd launches the block-generator test suite runner.
+var RunnerCmd *cobra.Command
+
+func init() {
+ rand.Seed(12345)
+ var runnerArgs Args
+
+ RunnerCmd = &cobra.Command{
+ Use: "runner",
+ Short: "Run test suite and collect results.",
+ Long: "Run an automated test suite using the block-generator daemon and a provided conduit binary. Results are captured to a specified output directory.",
+ Run: func(cmd *cobra.Command, args []string) {
+ if err := Run(runnerArgs); err != nil {
+ fmt.Println(err)
+ }
+ },
+ }
+
+ RunnerCmd.Flags().StringVarP(&runnerArgs.Path, "scenario", "s", "", "Directory containing scenarios, or specific scenario file.")
+ RunnerCmd.Flags().StringVarP(&runnerArgs.ConduitBinary, "conduit-binary", "i", "", "Path to conduit binary.")
+ RunnerCmd.Flags().Uint64VarP(&runnerArgs.MetricsPort, "metrics-port", "p", 9999, "Port to start the metrics server at.")
+ RunnerCmd.Flags().StringVarP(&runnerArgs.PostgresConnectionString, "postgres-connection-string", "c", "", "Postgres connection string.")
+ RunnerCmd.Flags().DurationVarP(&runnerArgs.RunDuration, "test-duration", "d", 5*time.Minute, "Duration to use for each scenario.")
+ RunnerCmd.Flags().StringVarP(&runnerArgs.ReportDirectory, "report-directory", "r", "", "Location to place test reports.")
+ RunnerCmd.Flags().StringVarP(&runnerArgs.LogLevel, "log-level", "l", "error", "LogLevel to use when starting Conduit. [panic, fatal, error, warn, info, debug, trace]")
+ RunnerCmd.Flags().StringVarP(&runnerArgs.CPUProfilePath, "cpuprofile", "", "", "Path where Conduit writes its CPU profile.")
+ RunnerCmd.Flags().BoolVarP(&runnerArgs.ResetReportDir, "reset-report-dir", "", false, "If set any existing report directory will be deleted before running tests.")
+ RunnerCmd.Flags().BoolVarP(&runnerArgs.RunValidation, "validate", "", false, "If set the validator will run after test-duration has elapsed to verify data is correct. An extra line in each report indicates validator success or failure.")
+ RunnerCmd.Flags().BoolVarP(&runnerArgs.KeepDataDir, "keep-data-dir", "k", false, "If set the validator will not delete the data directory after tests complete.")
+ RunnerCmd.Flags().StringVarP(&runnerArgs.GenesisFile, "genesis-file", "f", "", "file path to the genesis associated with the db snapshot")
+ RunnerCmd.Flags().BoolVarP(&runnerArgs.ResetDB, "reset-db", "", false, "If set database will be deleted before running tests.")
+
+ RunnerCmd.MarkFlagRequired("scenario")
+ RunnerCmd.MarkFlagRequired("conduit-binary")
+ RunnerCmd.MarkFlagRequired("postgres-connection-string")
+ RunnerCmd.MarkFlagRequired("report-directory")
+}
diff --git a/tools/block-generator/runner/template/conduit.yml.tmpl b/tools/block-generator/runner/template/conduit.yml.tmpl
new file mode 100644
index 000000000..c361426ee
--- /dev/null
+++ b/tools/block-generator/runner/template/conduit.yml.tmpl
@@ -0,0 +1,61 @@
+# Log verbosity: PANIC, FATAL, ERROR, WARN, INFO, DEBUG, TRACE
+log-level: {{.LogLevel}}
+
+# If no log file is provided logs are written to stdout.
+log-file: {{.LogFile}}
+
+# Number of retries to perform after a pipeline plugin error.
+retry-count: 10
+
+# Time duration to wait between retry attempts.
+retry-delay: "1s"
+
+# Optional filepath to use for pidfile.
+#pid-filepath: /path/to/pidfile
+
+# Whether or not to print the conduit banner on startup.
+hide-banner: false
+
+# When enabled prometheus metrics are available on '/metrics'
+metrics:
+ mode: ON
+ addr: "{{.MetricsPort}}"
+ prefix: "conduit"
+
+# The importer is typically an algod follower node.
+importer:
+ name: algod
+ config:
+ # The mode of operation, either "archival" or "follower".
+ # * archival mode allows you to start processing on any round but does not
+ # contain the ledger state delta objects required for the postgres writer.
+ # * follower mode allows you to use a lightweight non-archival node as the
+ # data source. In addition, it will provide ledger state delta objects to
+ # the processors and exporter.
+ mode: "follower"
+
+ # Algod API address.
+ netaddr: "{{.AlgodNet}}"
+
+ # Algod API token.
+ token: ""
+
+
+# Zero or more processors may be defined to manipulate what data
+# reaches the exporter.
+processors:
+
+# An exporter is defined to do something with the data.
+exporter:
+ name: postgresql
+ config:
+ # Pgsql connection string
+ # See https://github.com/jackc/pgconn for more details
+ connection-string: "{{ .PostgresConnectionString }}"
+
+ # Maximum connection number for connection pool
+ # This means the total number of active queries that can be running
+ # concurrently can never be more than this
+ max-conn: 20
+
+
diff --git a/tools/block-generator/scenarios/config.asset.close.yml b/tools/block-generator/scenarios/config.asset.close.yml
new file mode 100644
index 000000000..9a8b86bf5
--- /dev/null
+++ b/tools/block-generator/scenarios/config.asset.close.yml
@@ -0,0 +1,16 @@
+name: "Asset Close"
+genesis_accounts: 10000
+genesis_account_balance: 1000000000000
+tx_per_block: 5000
+
+# transaction distribution
+tx_asset_fraction: 1.0
+
+# asset config
+asset_create_fraction: 0.001
+asset_optin_fraction: 0.50
+asset_close_fraction: 0.40
+asset_xfer_fraction: 0.099
+
+# pay config must equal 1.0
+pay_xfer_fraction: 1.0
diff --git a/tools/block-generator/scenarios/config.asset.destroy.yml b/tools/block-generator/scenarios/config.asset.destroy.yml
new file mode 100644
index 000000000..953572efb
--- /dev/null
+++ b/tools/block-generator/scenarios/config.asset.destroy.yml
@@ -0,0 +1,16 @@
+name: "Asset Destroy"
+genesis_accounts: 10000
+genesis_account_balance: 1000000000000
+tx_per_block: 5000
+
+# transaction distribution
+tx_asset_fraction: 1.0
+
+# asset config
+asset_create_fraction: 0.8
+asset_destroy_fraction: 0.2
+asset_optin_fraction: 0.0
+asset_close_fraction: 0.0
+
+# pay config must equal 1.0
+pay_xfer_fraction: 1.0
diff --git a/tools/block-generator/scenarios/config.asset.xfer.yml b/tools/block-generator/scenarios/config.asset.xfer.yml
new file mode 100644
index 000000000..3e6643ba3
--- /dev/null
+++ b/tools/block-generator/scenarios/config.asset.xfer.yml
@@ -0,0 +1,15 @@
+name: "Asset Xfer"
+genesis_accounts: 10000
+genesis_account_balance: 1000000000000
+tx_per_block: 5000
+
+# transaction distribution
+tx_asset_fraction: 1.0
+
+# asset config
+asset_create_fraction: 0.001
+asset_optin_fraction: 0.10
+asset_xfer_fraction: 0.899
+
+# pay config must equal 1.0
+pay_xfer_fraction: 1.0
diff --git a/tools/block-generator/scenarios/config.mixed.jumbo.yml b/tools/block-generator/scenarios/config.mixed.jumbo.yml
new file mode 100644
index 000000000..630ab8dee
--- /dev/null
+++ b/tools/block-generator/scenarios/config.mixed.jumbo.yml
@@ -0,0 +1,19 @@
+name: "Mixed (jumbo)"
+genesis_accounts: 10000
+genesis_account_balance: 1000000000000
+tx_per_block: 25000
+
+# transaction distribution
+tx_pay_fraction: 0.3
+tx_asset_fraction: 0.7
+
+# payment config
+pay_acct_create_fraction: 0.02
+pay_xfer_fraction: 0.98
+
+# asset config
+asset_create_fraction: 0.001
+asset_optin_fraction: 0.1
+asset_close_fraction: 0.05
+asset_xfer_fraction: 0.849
+asset_delete_fraction: 0
diff --git a/tools/block-generator/scenarios/config.mixed.yml b/tools/block-generator/scenarios/config.mixed.yml
new file mode 100644
index 000000000..d6e1eea60
--- /dev/null
+++ b/tools/block-generator/scenarios/config.mixed.yml
@@ -0,0 +1,19 @@
+name: "Mixed"
+genesis_accounts: 10000
+genesis_account_balance: 1000000000000
+tx_per_block: 5000
+
+# transaction distribution
+tx_pay_fraction: 0.3
+tx_asset_fraction: 0.7
+
+# payment config
+pay_acct_create_fraction: 0.02
+pay_xfer_fraction: 0.98
+
+# asset config
+asset_create_fraction: 0.001
+asset_optin_fraction: 0.1
+asset_close_fraction: 0.05
+asset_xfer_fraction: 0.849
+asset_delete_fraction: 0
diff --git a/tools/block-generator/scenarios/config.payment.full.yml b/tools/block-generator/scenarios/config.payment.full.yml
new file mode 100644
index 000000000..8143c22bf
--- /dev/null
+++ b/tools/block-generator/scenarios/config.payment.full.yml
@@ -0,0 +1,14 @@
+name: "Pay (full)"
+genesis_accounts: 10000
+genesis_account_balance: 1000000000000
+tx_per_block: 5000
+
+# transaction distribution
+tx_pay_fraction: 1.0
+
+# payment config
+pay_acct_create_fraction: 0.02
+pay_xfer_fraction: 0.98
+
+# asset config must be 1.0
+asset_create_fraction: 1.0
diff --git a/tools/block-generator/scenarios/config.payment.jumbo.yml b/tools/block-generator/scenarios/config.payment.jumbo.yml
new file mode 100644
index 000000000..23f0a92d6
--- /dev/null
+++ b/tools/block-generator/scenarios/config.payment.jumbo.yml
@@ -0,0 +1,14 @@
+name: "Pay (jumbo)"
+genesis_accounts: 10000
+genesis_account_balance: 1000000000000
+tx_per_block: 25000
+
+# transaction distribution
+tx_pay_fraction: 1.0
+
+# payment config
+pay_acct_create_fraction: 0.02
+pay_xfer_fraction: 0.98
+
+# asset config must be 1.0
+asset_create_fraction: 1.0
diff --git a/tools/block-generator/scenarios/config.payment.small.yml b/tools/block-generator/scenarios/config.payment.small.yml
new file mode 100644
index 000000000..7f01232db
--- /dev/null
+++ b/tools/block-generator/scenarios/config.payment.small.yml
@@ -0,0 +1,14 @@
+name: "Pay (small)"
+genesis_accounts: 10000
+genesis_account_balance: 1000000000000
+tx_per_block: 100
+
+# transaction distribution
+tx_pay_fraction: 1.0
+
+# payment config
+pay_acct_create_fraction: 0.02
+pay_xfer_fraction: 0.98
+
+# asset config must be 1.0
+asset_create_fraction: 1.0
diff --git a/tools/block-generator/test_config.yml b/tools/block-generator/test_config.yml
new file mode 100644
index 000000000..6d411e9ad
--- /dev/null
+++ b/tools/block-generator/test_config.yml
@@ -0,0 +1,23 @@
+name: Test Config
+# genesis accounts
+genesis_accounts: 10
+genesis_account_balance: 40000000000000
+
+tx_per_block: 10
+
+# every 100 payment transactions, create an account
+tx_pay_fraction: 0.5
+tx_asset_fraction: 0.5
+
+# transaction distribution
+pay_acct_create_fraction: 0.5
+pay_xfer_fraction: 0.5
+
+# asset distribution
+asset_create_fraction: 0.25
+asset_optin_fraction: 0.25
+asset_close_fraction: 0.25
+asset_xfer_fraction: 0.25
+asset_destroy_fraction: 0.0
+
+
diff --git a/tools/block-generator/upload_metrics.py b/tools/block-generator/upload_metrics.py
new file mode 100644
index 000000000..f623a0c55
--- /dev/null
+++ b/tools/block-generator/upload_metrics.py
@@ -0,0 +1,53 @@
+from datadog import initialize
+from datadog import api
+import os
+import argparse
+
+parser = argparse.ArgumentParser(description="Upload performance metrics to Datadog")
+parser.add_argument(
+ "-f",
+ "--perf-reports",
+ required=True,
+ action="store",
+ dest="files",
+ type=str,
+ nargs="*",
+ help="list of reports created by the block generator",
+)
+parser.add_argument(
+ "-c",
+ "--binary-version",
+ required=True,
+ help="Release version or the commit hash of the Conduit binary used during the performance test",
+)
+args = parser.parse_args()
+
+
+def parse_report(report):
+ data = dict()
+ with open(report) as f:
+ for line in f:
+ tag, value = line.split(":")
+ data[tag] = value if tag == "scenario" else float(value)
+ return data
+
+
+if __name__ == "__main__":
+ print("initializing datadog")
+ options = {
+ "api_key": os.getenv("DATADOG_API_KEY"),
+ "app_key": os.getenv("DATADOG_APP_KEY"),
+ }
+ initialize(**options)
+ for fp in args.files:
+ print(f"uploading metrics for {fp}")
+ data = parse_report(fp)
+ tags = [
+ f"conduit_version:{args.binary_version}",
+ f'duration:{data["test_duration_seconds"]}s',
+ f'scenario:{data["scenario"]}',
+ ]
+ transactionsPerBlockAvgMetricName = "conduit.perf.transactions_per_second"
+ tps = data["final_overall_transactions_per_second"]
+ api.Metric.send(metric=transactionsPerBlockAvgMetricName, points=tps, tags=tags)
+ print("uploaded metrics")
diff --git a/tools/block-generator/util/util.go b/tools/block-generator/util/util.go
new file mode 100644
index 000000000..b36fca9ee
--- /dev/null
+++ b/tools/block-generator/util/util.go
@@ -0,0 +1,78 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package util
+
+import (
+ "database/sql"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+ // import postgres driver
+ _ "github.com/lib/pq"
+)
+
+// ErrorNotInitialized is returned when the database is not initialized.
+var ErrorNotInitialized error = errors.New("database not initialized")
+
+// MaybeFail exits if there was an error.
+func MaybeFail(err error, errfmt string, params ...interface{}) {
+ if err == nil {
+ return
+ }
+ fmt.Fprintf(os.Stderr, errfmt, params...)
+ fmt.Fprintf(os.Stderr, "\nError: %v\n", err)
+ os.Exit(1)
+}
+
+// GetNextRound returns the next account round from the metastate table.
+func GetNextRound(postgresConnectionString string) (uint64, error) {
+ conn, err := sql.Open("postgres", postgresConnectionString)
+ if err != nil {
+ return 0, fmt.Errorf("postgres connection string did not work: %w", err)
+ }
+ defer conn.Close()
+ query := `SELECT v FROM metastate WHERE k='state';`
+ var state []uint8
+ if err = conn.QueryRow(query).Scan(&state); err != nil {
+ if strings.Contains(err.Error(), `relation "metastate" does not exist`) {
+ return 0, ErrorNotInitialized
+ }
+ return 0, fmt.Errorf("unable to get next db round: %w", err)
+ }
+ kv := make(map[string]uint64)
+ err = json.Unmarshal(state, &kv)
+ if err != nil {
+ return 0, fmt.Errorf("unable to get next account round: %w", err)
+ }
+ return kv["next_account_round"], nil
+}
+
+// EmptyDB empties the database.
+func EmptyDB(postgresConnectionString string) error {
+ conn, err := sql.Open("postgres", postgresConnectionString)
+ if err != nil {
+ return fmt.Errorf("postgres connection string did not work: %w", err)
+ }
+ defer conn.Close()
+ query := `DROP SCHEMA public CASCADE; CREATE SCHEMA public;`
+ if _, err = conn.Exec(query); err != nil {
+ return fmt.Errorf("unable to reset postgres DB: %w", err)
+ }
+ return nil
+}
diff --git a/tools/debug/chopper/main.go b/tools/debug/chopper/main.go
new file mode 100644
index 000000000..5e70292f0
--- /dev/null
+++ b/tools/debug/chopper/main.go
@@ -0,0 +1,230 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+// chopper compares raw Algorand logs for matching catchpoint (balance trie) roots and labels
+package main
+
+import (
+ "bufio"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/fatih/color"
+ "golang.org/x/text/cases"
+ "golang.org/x/text/language"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/logging/telemetryspec"
+)
+
+const (
+ red = color.FgRed
+ green = color.FgGreen
+ yellow = color.FgYellow
+)
+
+var help = flag.Bool("help", false, "Show help")
+var helpShort = flag.Bool("h", false, "Show help")
+var labels = flag.Bool("labels", false, "Compare catchpoint labels in addition to roots")
+var labelsShort = flag.Bool("l", false, "Compare catchpoint labels in addition to roots")
+
+func usage() {
+ fmt.Fprintln(os.Stderr, `Utility to extract and compare balance root and catchpoint labels messages from algod log files (node.log)
+Usage: ./chopper [--labels] file1 file2`)
+}
+
+// logEntry is json representing catchpoint root message telemetry
+type logEntry struct {
+ Details telemetryspec.CatchpointRootUpdateEventDetails
+}
+
+// rootLabelInfo is parsed roots/labels from a log file
+type rootLabelInfo struct {
+ roots map[basics.Round]*telemetryspec.CatchpointRootUpdateEventDetails
+ labels map[basics.Round]string
+}
+
+// extractEntries reads the log file line by line and collects root and labels entries
+func extractEntries(filename string, checkLabels bool) rootLabelInfo {
+ f, err := os.Open(filename)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error opening %s: %s\n", filename, err.Error())
+ os.Exit(1)
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+
+ var re *regexp.Regexp
+ if checkLabels {
+ re = regexp.MustCompile(`Creating a catchpoint label (\d+#[A-Z0-9]+)\s+for round=(\d+).*`)
+ }
+
+ result := rootLabelInfo{
+ roots: make(map[basics.Round]*telemetryspec.CatchpointRootUpdateEventDetails),
+ }
+ if checkLabels {
+ result.labels = make(map[basics.Round]string)
+ }
+
+ for s.Scan() {
+ line := s.Text()
+ if line[0] == '{' && strings.Contains(line[:20], "Root") {
+ var entry logEntry
+ if err := json.Unmarshal([]byte(line), &entry); err != nil {
+ fmt.Fprintf(os.Stderr, "Error reading catchpoint root entry %s: %s\n", filename, err.Error())
+ continue
+ }
+ result.roots[basics.Round(entry.Details.NewBase)] = &entry.Details
+ } else if checkLabels && strings.HasPrefix(line, `{"file":"catchpointlabel.go"`) {
+ entry := map[string]interface{}{}
+ if err := json.Unmarshal([]byte(line), &entry); err != nil {
+ fmt.Fprintf(os.Stderr, "Error reading catchpoint label entry %s: %s\n", filename, err.Error())
+ continue
+ }
+ matches := re.FindStringSubmatch(entry["msg"].(string))
+ if len(matches) != 3 {
+ fmt.Fprintf(os.Stderr, "No catchpoint label match %s: %s %s\n", filename, matches, entry["msg"])
+ continue
+ }
+ uintRound, err := strconv.ParseUint(matches[2], 10, 64)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Cannot parse round %s: %s\n", filename, matches[1])
+ continue
+ }
+ result.labels[basics.Round(uintRound)] = matches[1]
+ }
+ }
+
+ if err := s.Err(); err != nil {
+ fmt.Fprintf(os.Stderr, "Error reading lines from %s: %s\n", filename, err.Error())
+ os.Exit(1)
+ }
+
+ return result
+}
+
+type reportData struct {
+ what string
+ size1 int
+ size2 int
+ matched int
+ mismatched []interface{}
+ errReporter func(interface{})
+}
+
+// report prints out stats about matched and mismatched roots or labels
+func report(rd reportData) {
+ fmt.Printf("%s in first: %d, second: %d\n", cases.Title(language.English).String(rd.what), rd.size1, rd.size2)
+
+ const matchedStr = "Matched %s: %d"
+ c := yellow
+ if rd.matched > 0 {
+ c = green
+ }
+ fmt.Println(color.New(c).Sprintf(matchedStr, rd.what, rd.matched))
+
+ const mismatchedStr = "Mismatched %s: %d"
+ c = green
+ if len(rd.mismatched) > 0 {
+ c = red
+ }
+ fmt.Println(color.New(c).Sprintf(mismatchedStr, rd.what, len(rd.mismatched)))
+ if len(rd.mismatched) > 0 {
+ for _, entry := range rd.mismatched {
+ rd.errReporter(entry)
+ }
+ }
+ fmt.Printf("Other %s in first: %d, second: %d\n", rd.what, rd.size1-rd.matched-len(rd.mismatched), rd.size2-rd.matched-len(rd.mismatched))
+}
+
+func main() {
+ flag.Parse()
+
+ if *help || *helpShort || len(flag.Args()) < 2 {
+ usage()
+ os.Exit(1)
+ }
+
+ checkLabels := *labels || *labelsShort
+
+ file1 := flag.Args()[0]
+ file2 := flag.Args()[1]
+
+ // load data
+ info1 := extractEntries(file1, checkLabels)
+ info2 := extractEntries(file2, checkLabels)
+
+ // match roots
+ matchedRoots := 0
+ var mismatchedRoots []interface{}
+ for rnd, tree1 := range info1.roots {
+ if tree2, ok := info2.roots[rnd]; ok {
+ if tree1.Root == tree2.Root {
+ matchedRoots++
+ } else {
+ mismatchedRoots = append(mismatchedRoots, [2]*telemetryspec.CatchpointRootUpdateEventDetails{tree1, tree2})
+ }
+ }
+ }
+
+ // match labels
+ matchedLabels := 0
+ var mismatchedLabels []interface{}
+ if checkLabels {
+ for rnd, label1 := range info1.labels {
+ if label2, ok := info2.labels[rnd]; ok {
+ if label1 == label2 {
+ matchedLabels++
+ } else {
+ mismatchedLabels = append(mismatchedLabels, [2]string{label1, label2})
+ }
+ }
+ }
+
+ }
+
+ report(reportData{
+ what: "roots",
+ size1: len(info1.roots),
+ size2: len(info2.roots),
+ matched: matchedRoots,
+ mismatched: mismatchedRoots,
+ errReporter: func(e interface{}) {
+ entry := e.([2]*telemetryspec.CatchpointRootUpdateEventDetails)
+ fmt.Printf("NewBase: %d, first: (%d, %s), second (%d,%s)\n", entry[0].NewBase, entry[0].OldBase, entry[0].Root, entry[1].OldBase, entry[1].Root)
+ },
+ })
+
+ if checkLabels {
+ report(reportData{
+ what: "labels",
+ size1: len(info1.labels),
+ size2: len(info2.labels),
+ matched: matchedLabels,
+ mismatched: mismatchedLabels,
+ errReporter: func(e interface{}) {
+ entry := e.([2]string)
+ fmt.Printf("first: %s != %s second\n", entry[0], entry[1])
+ },
+ })
+ }
+}
diff --git a/tools/debug/dumpblocks/main.go b/tools/debug/dumpblocks/main.go
index 461e7949c..a932d9e60 100644
--- a/tools/debug/dumpblocks/main.go
+++ b/tools/debug/dumpblocks/main.go
@@ -54,7 +54,7 @@ func main() {
fmt.Println("-blockdb=file required")
usage()
}
- uri := fmt.Sprintf("file:%s?mode=ro", *blockDBfile)
+ uri := fmt.Sprintf("file:%s?_journal_mode=wal", *blockDBfile)
fmt.Println("Opening", uri)
db, err := sql.Open("sqlite3", uri)
if err != nil {
diff --git a/tools/debug/jslog b/tools/debug/jslog
index 540ebb66c..fe27719f0 100755
--- a/tools/debug/jslog
+++ b/tools/debug/jslog
@@ -96,7 +96,7 @@ class LogFile:
return None
if when:
- when = datetime.datetime.strptime(when, '%Y-%m-%dT%H:%M:%S%z')
+ when = datetime.datetime.strptime(when, '%Y-%m-%dT%H:%M:%S.%f%z')
now = time.time()
dt = when.timestamp() - now
# TODO: format sub-second if available
diff --git a/tools/x-repo-types/Makefile b/tools/x-repo-types/Makefile
new file mode 100644
index 000000000..05094a848
--- /dev/null
+++ b/tools/x-repo-types/Makefile
@@ -0,0 +1,58 @@
+all: goal-v-sdk goal-v-spv
+
+# go-algorand vs go-algorand-sdk:
+
+goal-v-sdk: goal-v-sdk-state-delta goal-v-sdk-genesis goal-v-sdk-block goal-v-sdk-blockheader goal-v-sdk-stateproof
+
+goal-v-sdk-state-delta:
+ x-repo-types --x-package "github.com/algorand/go-algorand/ledger/ledgercore" \
+ --x-type "StateDelta" \
+ --y-branch "develop" \
+ --y-package "github.com/algorand/go-algorand-sdk/v2/types" \
+ --y-type "LedgerStateDelta"
+
+goal-v-sdk-genesis:
+ x-repo-types --x-package "github.com/algorand/go-algorand/data/bookkeeping" \
+ --x-type "Genesis" \
+ --y-branch "develop" \
+ --y-package "github.com/algorand/go-algorand-sdk/v2/types" \
+ --y-type "Genesis"
+
+goal-v-sdk-block:
+ x-repo-types --x-package "github.com/algorand/go-algorand/data/bookkeeping" \
+ --x-type "Block" \
+ --y-branch "develop" \
+ --y-package "github.com/algorand/go-algorand-sdk/v2/types" \
+ --y-type "Block"
+
+goal-v-sdk-blockheader:
+ x-repo-types --x-package "github.com/algorand/go-algorand/data/bookkeeping" \
+ --x-type "BlockHeader" \
+ --y-branch "develop" \
+ --y-package "github.com/algorand/go-algorand-sdk/v2/types" \
+ --y-type "BlockHeader"
+
+goal-v-sdk-stateproof:
+ x-repo-types --x-package "github.com/algorand/go-algorand/crypto/stateproof" \
+ --x-type "StateProof" \
+ --y-branch "develop" \
+ --y-package "github.com/algorand/go-algorand-sdk/v2/types" \
+ --y-type "StateProof"
+
+# go-algorand vs go-stateproof-verification:
+
+goal-v-spv: goal-v-spv-stateproof
+
+goal-v-spv-stateproof:
+ x-repo-types --x-package "github.com/algorand/go-algorand/crypto/stateproof" \
+ --x-type "StateProof" \
+ --y-package "github.com/algorand/go-stateproof-verification/stateproof" \
+ --y-type "StateProof"
+
+# reset typeAnalyzer/main.go for passing checks:
+
+reset-dummy-main:
+ x-repo-types --x-package "github.com/algorand/go-algorand/ledger/ledgercore" \
+ --x-type "StateDelta" \
+ --y-package "github.com/algorand/go-algorand/data/bookkeeping" \
+ --y-type "Genesis"
diff --git a/tools/x-repo-types/README.md b/tools/x-repo-types/README.md
new file mode 100644
index 000000000..97e7706e7
--- /dev/null
+++ b/tools/x-repo-types/README.md
@@ -0,0 +1,52 @@
+# Cross Repo Type Comparisons
+
+Given two types **X** and **Y** from separate repositories, compare the types and generate a report of any differences to the serialized shape of the types. In particular it ignores different embedding of structs, different field names if `codec` tags are used, and different types if they map to the same primitives.
+This tool is designed to be used in CI systems to alert us if a change is made to one repo without a corresponding change to another. For example the `Genesis` type in `go-algorand` and `go-algorand-sdk`. See the [Makefile](./Makefile) for additional examples.
+
+## Example run
+
+```sh
+goal-v-sdk-state-delta-xrt:
+ x-repo-types --x-package "github.com/algorand/go-algorand/ledger/ledgercore" \
+ --x-type "StateDelta" \
+ --y-branch "develop" \
+ --y-package "github.com/algorand/go-algorand-sdk/v2/types" \
+ --y-type "LedgerStateDelta"
+```
+
+## Pseudocode
+
+### Cross Type Comparison Process
+
+1. Inside of `tools/x-repo-types` run the command `x-repo-types --x-package X_PACKAGE_NAME ...`
+2. `x-repo-types` then does the following:
+ 1. `go get`'s the package
+ 2. Populates the template `typeAnalyzer/main.tmpl` with comparison types
+ 3. Saves it in `typeAnalyzer/main.go`
+ 4. Executes it
+3. `typeAnalyzer/main.go` runs the logic defined in `typeAnalyzer/typeAnalyzer.go`:
+ 1. using reflection, build up each type's "Type Tree"
+ 2. compare the trees using the rules outlined below
+4. If the template reports back a non-empty diff, exit with an error
+
+### Type Tree Comparison
+
+`func StructDiff(x, y interface{}, exclusions map[string]bool) (TypeNode, TypeNode, *Diff, error)` in `typeAnalyzer/typeAnalyzer.go` implements the following recursive notion of _identical_ types:
+
+* if **X** and **Y** are native simple types (`int`, `uint64`, `string`, ...), they are _identical_ IFF they are the same type
+* if both **X** and **Y** are compound types (`struct`, slice, `map`, ...) with each of their child types being _identical_ and with _equivalent serialization metadata_, then they are _identical_
+ * _equivalent serialization metadata_ definition:
+ * for non-structs: there is no metadata so the metadata are _trivially_ identical
+ * for structs:
+ * the keys will encode to the same name
+ * omission of values based on zeroness, etc. will happen in the same way for both structs
+ * embedded structs will be flattened
+
+* ELSE: they are **not** _identical_
+
+### Exceptional cases
+
+There are some cases that break the definition above. For example, `basics.MicroAlgos` is a struct in
+`go-algorand` but is an alias for `uint64` in `go-algorand-sdk`. Our serializers know to produce the same
+output, but this violates the previous notion of _identical_. Such exceptions are handled by providing the string produced by the type's `TypeNode.String()` method
+as an element in the set `diffExclusions` of `typeAnalyzer/typeAnalyzer.go`.
diff --git a/tools/x-repo-types/go.mod b/tools/x-repo-types/go.mod
new file mode 100644
index 000000000..0fba7a8da
--- /dev/null
+++ b/tools/x-repo-types/go.mod
@@ -0,0 +1,20 @@
+module github.com/algorand/go-algorand/tools/x-repo-types
+
+go 1.17
+
+replace github.com/algorand/go-algorand => ../..
+
+require (
+ github.com/algorand/go-algorand v0.0.0-20230502140608-e24a35add0bb
+ github.com/spf13/cobra v1.7.0
+ github.com/stretchr/testify v1.8.2
+)
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/kr/text v0.2.0 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/spf13/pflag v1.0.5 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+)
diff --git a/tools/x-repo-types/go.sum b/tools/x-repo-types/go.sum
new file mode 100644
index 000000000..dba66ee26
--- /dev/null
+++ b/tools/x-repo-types/go.sum
@@ -0,0 +1,29 @@
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
+github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/tools/x-repo-types/typeAnalyzer/main.go b/tools/x-repo-types/typeAnalyzer/main.go
new file mode 100644
index 000000000..f5225a54a
--- /dev/null
+++ b/tools/x-repo-types/typeAnalyzer/main.go
@@ -0,0 +1,85 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+/*
+ WARNING
+ THIS FILE ONLY EXISTS FOR DEBUGGING AND TO MAKE THE BUILD HAPPY
+ !!!!! IT IS OVERWRITTEN AT RUNTIME !!!!!
+*/
+
+package main
+
+import (
+ "fmt"
+ "os"
+
+ xpkg "net/http"
+ ypkg "time"
+)
+
+func main() {
+ xRoot := MakeType(xpkg.Request{})
+ yRoot := MakeType(ypkg.Time{})
+
+ // ---- BUILD ---- //
+ x, y := xRoot.Type, yRoot.Type
+
+ fmt.Printf("Build the Type Tree for %s\n\n", &xRoot)
+ xCycle := xRoot.Build()
+ xTgt := Target{ChildName{Name: fmt.Sprintf("%q", x)}, xRoot}
+
+ fmt.Printf("Build the Type Tree for %s\n\n", &yRoot)
+ yCycle := yRoot.Build()
+ yTgt := Target{ChildName{Name: fmt.Sprintf("%q", y)}, yRoot}
+
+ fmt.Printf("Potential CYCLE in %s:\n%s\n\n", &xRoot, xCycle)
+ fmt.Printf("Potential CYCLE in %s:\n%s\n\n", &yRoot, yCycle)
+
+ // ---- DEBUG ---- //
+
+ /*
+ xRoot.Print()
+ fmt.Printf("\n\nSerialization Tree of %q\n\n", x)
+ xTgt.PrintSerializable()
+
+ yRoot.Print()
+ fmt.Printf("\n\nSerialization Tree of %q\n\n", y)
+ yTgt.PrintSerializable()
+ */
+
+ // ---- STATS ---- //
+
+ LeafStatsReport(xTgt)
+ LeafStatsReport(yTgt)
+
+ MaxDepthReport(xTgt)
+ MaxDepthReport(yTgt)
+
+ // ---- DIFF ---- //
+
+ fmt.Printf("\n\nCompare the Type Trees %q v %q\n", x, y)
+ xType, yType, diff, err := StructDiff(xpkg.Request{}, ypkg.Time{}, diffExclusions)
+ if err != nil {
+ fmt.Printf("Error: %s\n", err)
+ os.Exit(1)
+ }
+ fmt.Println(Report(xType, yType, diff))
+
+ if !diff.Empty() {
+ // signal that this "test" has failed
+ os.Exit(1)
+ }
+}
diff --git a/tools/x-repo-types/typeAnalyzer/main.tmpl b/tools/x-repo-types/typeAnalyzer/main.tmpl
new file mode 100644
index 000000000..37ec0699b
--- /dev/null
+++ b/tools/x-repo-types/typeAnalyzer/main.tmpl
@@ -0,0 +1,79 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package main
+
+import (
+ "fmt"
+ "os"
+
+ xpkg "{{.XModulePath}}/{{.XPackagePath}}"
+ ypkg "{{.YModulePath}}/{{.YPackagePath}}"
+)
+
+func main() {
+ xRoot := MakeType(xpkg.{{.XTypeInstance}}{})
+ yRoot := MakeType(ypkg.{{.YTypeInstance}}{})
+
+ // ---- BUILD ---- //
+ x, y := xRoot.Type, yRoot.Type
+
+ fmt.Printf("Build the Type Tree for %s\n\n", &xRoot)
+ xCycle := xRoot.Build()
+ xTgt := Target{ChildName{Name: fmt.Sprintf("%q", x)}, xRoot}
+
+ fmt.Printf("Build the Type Tree for %s\n\n", &yRoot)
+ yCycle := yRoot.Build()
+ yTgt := Target{ChildName{Name: fmt.Sprintf("%q", y)}, yRoot}
+
+ fmt.Printf("Potential CYCLE in %s:\n%s\n\n", &xRoot, xCycle)
+ fmt.Printf("Potential CYCLE in %s:\n%s\n\n", &yRoot, yCycle)
+
+ // ---- DEBUG ---- //
+
+ /*
+ xRoot.Print()
+ fmt.Printf("\n\nSerialization Tree of %q\n\n", x)
+ xTgt.PrintSerializable()
+
+ yRoot.Print()
+ fmt.Printf("\n\nSerialization Tree of %q\n\n", y)
+ yTgt.PrintSerializable()
+ */
+
+ // ---- STATS ---- //
+
+ LeafStatsReport(xTgt)
+ LeafStatsReport(yTgt)
+
+ MaxDepthReport(xTgt)
+ MaxDepthReport(yTgt)
+
+ // ---- DIFF ---- //
+
+ fmt.Printf("\n\nCompare the Type Trees %q v %q\n", x, y)
+ xType, yType, diff, err := StructDiff(xpkg.{{.XTypeInstance}}{}, ypkg.{{.YTypeInstance}}{}, diffExclusions)
+ if err != nil {
+ fmt.Printf("Error: %s\n", err)
+ os.Exit(1)
+ }
+ fmt.Println(Report(xType, yType, diff))
+
+ if !diff.Empty() {
+ // signal that this "test" has failed
+ os.Exit(1)
+ }
+}
diff --git a/tools/x-repo-types/typeAnalyzer/typeAnalyzer.go b/tools/x-repo-types/typeAnalyzer/typeAnalyzer.go
new file mode 100644
index 000000000..3626e01ad
--- /dev/null
+++ b/tools/x-repo-types/typeAnalyzer/typeAnalyzer.go
@@ -0,0 +1,580 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package main
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "sort"
+ "strings"
+ "unicode"
+)
+
+// diffExclusions is a set of types to exclude. The string keys are assumed to be generated by
+// the `TypeNode.String()` method, and these values are ignored when building up the diff.
+// These exclusions represent types that are known to serialize in the same way, but
+// would be evaluated as different by the diff algorithm if not for this exclusion.
+var diffExclusions = map[string]bool{
+ // MicroAlgos is a struct with custom marshal override in go-algorand. In other repos it is a uint64.
+ `github.com/algorand/go-algorand/data/basics :: "basics.MicroAlgos" (struct)`: true,
+}
+
+// --------------- TYPE TREE DATA STRUCTURES --------------- //
+
+// TypeNode wraps reflect.Type and reflect.Kind to make it easier to
+// build a tree of types.
+type TypeNode struct {
+ Depth int
+ Type reflect.Type
+ Kind reflect.Kind
+ ChildNames []ChildName
+ children *ChildTypes
+}
+
+// String returns fully qualified information about the TypeNode.
+func (t *TypeNode) String() string {
+ return fmt.Sprintf("%s :: %q (%s)", t.Type.PkgPath(), t.Type, t.Kind)
+}
+
+// IsStruct returns true if the TypeNode is a struct.
+func (t *TypeNode) IsStruct() bool {
+ return t.Kind == reflect.Struct
+}
+
+// MakeType uses reflection to build a TypeNode from a concrete value.
+func MakeType(v interface{}) TypeNode {
+ t := reflect.TypeOf(v)
+ return TypeNode{Type: t, Kind: t.Kind()}
+}
+
+// ChildTypes represent a TypeNode's child Types.
+type ChildTypes map[string]TypeNode
+
+// ChildName represents the name a type is referred to together with
+// the Tag metadata (for the case of a struct field)
+type ChildName struct {
+ Name, Tag string
+}
+
+// String representing the ChildName in an invertible manner.
+func (e *ChildName) String() string {
+ return fmt.Sprintf("[%s](%s)", e.Name, e.Tag)
+}
+
+// ChildNameFromLabel de-"serialize"s a ChildName that has been rendered via ChildName.String()
+func ChildNameFromLabel(s string) (ChildName, error) {
+ re := regexp.MustCompile(`^\[(.*)\]\((.*)\)$`)
+ matches := re.FindStringSubmatch(s)
+ if len(matches) == 3 {
+ return ChildName{Name: matches[1], Tag: matches[2]}, nil
+ }
+ return ChildName{}, fmt.Errorf("invalid label: %s", s)
+}
+
+// Target represents a Type that is a child of another Type by
+// providing the ChildName that "points" to it.
+// In the case of a "root" TypeNode, it's up to the caller to provide
+// the ChildName.
+type Target struct {
+ ChildName
+ TypeNode TypeNode
+}
+
+// String is a convenience method for printing a Target.
+func (tgt *Target) String() string {
+ return fmt.Sprintf("%s|-->%s", &tgt.ChildName, &tgt.TypeNode)
+}
+
+// SerializationInfo provides the essential data
+// for go to serialize the field or other type.
+func (e *ChildName) SerializationInfo() string {
+ // Probably more subtlety is called for.
+ re := regexp.MustCompile(`^codec:"([^,"]+)`)
+
+ if e.Tag == "" {
+ return e.Name
+ }
+
+ matches := re.FindStringSubmatch(e.Tag)
+ if len(matches) > 1 {
+ return matches[1]
+ }
+ return e.Tag
+}
+
+// TargetPair represents a pair of Targets.
+type TargetPair struct {
+ X, Y Target
+}
+
+// Diff represents a difference (if any) between two Types.
+// CommonPath is the path in the trees of the types from their roots
+// that arrives at the difference.
+// Xdiff and Ydiff are the differences at the end of the common path
+// between children. Set theoretically:
+// * Xdiff = X - Y
+// * Ydiff = Y - X.
+type Diff struct {
+ CommonPath []TargetPair
+ Xdiff, Ydiff []Target
+}
+
+// Empty reports whether there was any difference at all.
+func (d *Diff) Empty() bool {
+ return d == nil || (len(d.Xdiff) == 0 && len(d.Ydiff) == 0)
+}
+
+// --------------- BUILD THE TYPE TREE --------------- //
+
+// Targets returns a slice of Targets, one for each child of the TypeNode.
+// In the case of structs, the order is the same as the order of the fields in the struct.
+// In the case of maps, the key Target precedes the value Target.
+func (t *TypeNode) Targets() []Target {
+ targets := make([]Target, 0, len(t.ChildNames))
+ for _, edge := range t.ChildNames {
+ targets = append(targets, Target{edge, (*t.children)[edge.String()]})
+ }
+ return targets
+}
+
+// TypePath encapsulates a path in a TypeNode tree.
+type TypePath []TypeNode
+
+// String is a convenience method for printing a TypePath
+// in a go-literal-friendly format.
+func (t TypePath) String() string {
+ parts := make([]string, len(t))
+ for i, node := range t {
+ parts[i] = fmt.Sprintf("%q", node.String())
+ }
+ return fmt.Sprintf("[]string{%s}", strings.Join(parts, ", "))
+}
+
+// IsLeaf returns true if the TypeNode has no children.
+func (t *TypeNode) IsLeaf() bool {
+ return t.children == nil || len(*t.children) == 0
+}
+
+// Build the TypeNode tree by finding all child types that belong to a kind and recursively
+// building their TypeNode trees. Stop traversing when a cycle is detected.
+func (t *TypeNode) Build() TypePath {
+ return t.build(TypePath{})
+}
+
+func (t *TypeNode) appendChild(typeName, typeTag string, child TypeNode) {
+ cname := ChildName{typeName, typeTag}
+ t.ChildNames = append(t.ChildNames, cname)
+ if t.children == nil {
+ children := make(ChildTypes)
+ t.children = &children
+ }
+ (*t.children)[cname.String()] = child
+}
+
+// build the TypeNode tree by finding all child types that belong to a kind and recursively
+// building their TypeNode trees.
+// Return a path that will be non-trivial only in a case that a cycle is detected.
+func (t *TypeNode) build(path TypePath) TypePath {
+ if t.IsStruct() {
+ me := t.String()
+ foundCycle := false
+ for _, node := range path {
+ if node.String() == me {
+ foundCycle = true
+ break
+ }
+ }
+ path = append(path, *t)
+ if foundCycle {
+ return path
+ }
+ }
+
+ var cyclePath TypePath
+ switch t.Kind {
+ case reflect.Struct:
+ cyclePath = t.buildStructChildren(path)
+ case reflect.Slice, reflect.Array:
+ cyclePath = t.buildListChild(path)
+ case reflect.Map:
+ cyclePath = t.buildMapChildren(path)
+ case reflect.Ptr:
+ cyclePath = t.buildPtrChild(path)
+ }
+
+ return cyclePath
+}
+
+// buildStructChildren builds the children of a struct type.
+func (t *TypeNode) buildStructChildren(path TypePath) TypePath {
+ var cyclePath TypePath
+ for i := 0; i < t.Type.NumField(); i++ {
+ typeField := t.Type.Field(i)
+ typeName := typeField.Name
+
+ // probably we need to skip typeField.Tag == `codec:"-"` as well
+ if typeName == "" || (!unicode.IsUpper(rune(typeName[0])) && typeName != "_struct") {
+ continue
+ }
+
+ if typeField.Anonymous {
+ // embedded struct case
+ actualKind := typeField.Type.Kind()
+ if actualKind != reflect.Struct {
+ panic(fmt.Sprintf("expected [%s] but got unexpected embedded type: %s", reflect.Struct, typeField.Type))
+ }
+
+ embedded := TypeNode{t.Depth, typeField.Type, reflect.Struct, nil, nil}
+ embeddedCyclePath := embedded.build(path)
+ if len(embeddedCyclePath) > 0 {
+ cyclePath = embeddedCyclePath
+ }
+ for _, edge := range embedded.ChildNames {
+ child := (*embedded.children)[edge.String()]
+ t.appendChild(edge.Name, edge.Tag, child)
+ }
+ continue
+ }
+
+ typeTag := string(typeField.Tag)
+ child := TypeNode{t.Depth + 1, typeField.Type, typeField.Type.Kind(), nil, nil}
+ childCyclePath := child.build(path)
+ if len(childCyclePath) > 0 {
+ cyclePath = childCyclePath
+ }
+ t.appendChild(typeName, typeTag, child)
+ }
+ return cyclePath
+}
+
+func (t *TypeNode) buildListChild(path TypePath) TypePath {
+ tt := t.Type.Elem()
+ child := TypeNode{t.Depth + 1, tt, tt.Kind(), nil, nil}
+ path = child.build(path)
+ t.appendChild("<list elt>", "", child)
+ return path
+}
+
+// buildMapChildren builds the children of a map type.
+// To distinguish between the key and value children as well as children of lists and structs,
+// the key child is given the name "<map key>" while the value child is given the name "<map val>".
+func (t *TypeNode) buildMapChildren(path TypePath) TypePath {
+ keyType, valueType := t.Type.Key(), t.Type.Elem()
+
+ keyChild := TypeNode{t.Depth + 1, keyType, keyType.Kind(), nil, nil}
+ // don't worry about path because struct keys must be hashable:
+ keyChild.build(path)
+ t.appendChild("<map key>", "", keyChild)
+
+ valChild := TypeNode{t.Depth + 1, valueType, valueType.Kind(), nil, nil}
+ path = valChild.build(path)
+ t.appendChild("<map val>", "", valChild)
+ return path
+}
+
+// buildPtrChild builds the child of a pointer type. To distinguish between a child
+// that is a pointer and other children, the child is given the name "<pointer>".
+func (t *TypeNode) buildPtrChild(path TypePath) TypePath {
+ tt := t.Type.Elem()
+ child := TypeNode{t.Depth + 1, tt, tt.Kind(), nil, nil}
+ path = child.build(path)
+ t.appendChild("<pointer>", "", child)
+ return path
+}
+
+// Visit traverses the Target tree and applies any actions provided at each node.
+func (tgt *Target) Visit(actions ...func(Target)) {
+ if len(actions) > 0 {
+ for _, action := range actions {
+ action(*tgt)
+ }
+ for _, target := range tgt.TypeNode.Targets() {
+ target.Visit(actions...)
+ }
+ }
+}
+
+// StructDiff compares two structs by building their type tree and then
+// calling targetTreeDiff on the trees.
+func StructDiff(x, y interface{}, exclusions map[string]bool) (TypeNode, TypeNode, *Diff, error) {
+ xRoot, yRoot := MakeType(x), MakeType(y)
+ xRoot.Build()
+ yRoot.Build()
+
+ diff, err := targetTreeDiff(Target{TypeNode: xRoot}, Target{TypeNode: yRoot}, exclusions)
+ return xRoot, yRoot, diff, err
+}
+
+// targetTreeDiff recursively computes a diff between two Target's x and y considering only data
+// that impacts serialization, but ignoring any assumption on key ordering.
+func targetTreeDiff(x, y Target, exclusions map[string]bool) (*Diff, error) {
+ xtype, ytype := x.TypeNode, y.TypeNode
+ if xtype.Depth != ytype.Depth {
+ return nil, fmt.Errorf("cannot compare types at different depth")
+ }
+ // if we got here it must be the case that either depth == 0 or
+ // the edges of x and y serialize the same way.
+
+ // First check that the native type for x and y are the same.
+ if xtype.Kind != ytype.Kind {
+ return &Diff{
+ Xdiff: []Target{x},
+ Ydiff: []Target{y},
+ }, nil
+ }
+
+ // So look at the children.
+ // If any children differ report back the diff.
+ xTgts, yTgts := xtype.Targets(), ytype.Targets()
+ xSerials, ySerials := make(map[string]Target), make(map[string]Target)
+ for _, tgt := range xTgts {
+ xSerials[tgt.ChildName.SerializationInfo()] = tgt
+ }
+ for _, tgt := range yTgts {
+ ySerials[tgt.ChildName.SerializationInfo()] = tgt
+ }
+ xDiff, yDiff := []Target{}, []Target{}
+ for k, v := range xSerials {
+ if _, ok := ySerials[k]; !ok {
+ xDiff = append(xDiff, v)
+ }
+ }
+ for k, v := range ySerials {
+ if _, ok := xSerials[k]; !ok {
+ yDiff = append(yDiff, v)
+ }
+ }
+ if len(xDiff) != 0 || len(yDiff) != 0 {
+ return &Diff{
+ Xdiff: xDiff,
+ Ydiff: yDiff,
+ }, nil
+ }
+
+ // Otherwise, call the children recursively. If any of them report
+ // a diff, modify the diff's CommonPath to include the current edge and return it.
+ for k, xChild := range xSerials {
+ if exclusions[xChild.TypeNode.String()] {
+ continue
+ }
+ yChild := ySerials[k]
+ diff, err := targetTreeDiff(xChild, yChild, exclusions)
+ if err != nil {
+ return nil, err
+
+ }
+ if diff != nil {
+ diff.CommonPath = append([]TargetPair{
+ {
+ X: xChild,
+ Y: yChild,
+ },
+ }, diff.CommonPath...)
+ return diff, nil
+ }
+ }
+ // No diffs detected up the tree:
+ return nil, nil
+}
+
+// --------------- DIFF REPORT ----------------- //
+
+// Report returns a human-readable listing of the differences as a string.
+func Report(x, y TypeNode, d *Diff) string {
+ var sb strings.Builder
+
+ sb.WriteString(`
+========================================================
+ STRUCT DIFF REPORT
+comparing
+ <<<<<`)
+ sb.WriteString(x.String())
+ sb.WriteString(`>>>>>
+VS
+ <<<<<`)
+ sb.WriteString(y.String())
+ sb.WriteString(`>>>>>
+========================================================`)
+
+ if d == nil {
+ sb.WriteString("\nNo differences found.")
+ } else {
+ if len(d.Xdiff) == 0 && len(d.Ydiff) == 0 {
+ if len(d.CommonPath) != 0 {
+ panic("A common paths was found with no diffs. This should NEVER happen.")
+ }
+ sb.WriteString("\nNo differences found.")
+ } else {
+ sb.WriteString(`
+--------------------------------------------------------
+ DIFFERENCES FOUND (partial)
+--------------------------------------------------------`)
+ sb.WriteString(fmt.Sprintf("\nCommon path of length %d:\n", len(d.CommonPath)))
+ for depth, tgts := range d.CommonPath {
+ indent := strings.Repeat(" ", depth)
+ sb.WriteString(fmt.Sprintf("%s_____LEVEL %d_____\n", indent, depth+1))
+ sb.WriteString(fmt.Sprintf("%sX-FIELD: %s\n%s\tX-TYPE: %s\n", indent, &tgts.X.ChildName, indent, &tgts.X.TypeNode))
+ sb.WriteString(fmt.Sprintf("%sY-FIELD: %s\n%s\tY-TYPE: %s\n", indent, &tgts.Y.ChildName, indent, &tgts.Y.TypeNode))
+ }
+ sb.WriteString(`
+X-DIFF
+------
+EXISTS IN: `)
+ sb.WriteString(fmt.Sprintf("%q", &x))
+ sb.WriteString(`
+MISSING FROM: `)
+ sb.WriteString(fmt.Sprintf("%q", &y))
+ sb.WriteString(fmt.Sprintf("\n%d TYPES TOTAL:\n", len(d.Xdiff)))
+ for i, tgt := range d.Xdiff {
+ sb.WriteString(fmt.Sprintf(`
+(%d)
+[FIELD](+codec): %s
+SOURCE: %s`, i+1, &tgt.ChildName, &tgt.TypeNode))
+ }
+
+ sb.WriteString(`
+
+
+
+Y-DIFF
+------
+EXISTS IN: `)
+ sb.WriteString(fmt.Sprintf("%q", &y))
+ sb.WriteString(`
+MISSING FROM: `)
+ sb.WriteString(fmt.Sprintf("%q", &x))
+ sb.WriteString(fmt.Sprintf("\n%d TYPES TOTAL:\n", len(d.Ydiff)))
+ for i, tgt := range d.Ydiff {
+ sb.WriteString(fmt.Sprintf(`(%d)
+[FIELD](+codec): %s
+SOURCE: %s
+`, i+1, &tgt.ChildName, &tgt.TypeNode))
+ }
+ }
+ }
+ sb.WriteString(`
+========================================================
+=============== STRUCT DIFF REPORT END ===============
+========================================================`)
+
+ return sb.String()
+}
+
+// ------- STATISTICS AND DEBUGGING ------------- //
+
+// Print prints out information about the TypeNode's structure using `Visit()`.
+func (t *TypeNode) Print() {
+ action := func(tgt Target) {
+ tabs := strings.Repeat("\t", tgt.TypeNode.Depth)
+ fmt.Printf("%s[depth=%d]. Value is type %q (%s)\n", tabs, tgt.TypeNode.Depth, tgt.TypeNode.Type, tgt.TypeNode.Kind)
+
+ if tgt.TypeNode.IsLeaf() {
+ x := fmt.Sprintf("%q", tgt.TypeNode.Type)
+ _ = x
+ fmt.Printf("%s-------B I N G O: A LEAF---------->%q (%s)\n", tabs, tgt.TypeNode.Type, tgt.TypeNode.Kind)
+ return
+ }
+ fmt.Printf("%s=====EDGE: %s=====>\n", tabs, tgt.ChildName)
+ }
+ (&Target{ChildName{}, *t}).Visit(action)
+}
+
+// PrintSerializable prints the information that determines go-codec serialization.
+// cf: https://github.com/algorand/go-codec/blob/master/codec/encode.go#L1416-L1436
+func (tgt Target) PrintSerializable() {
+ action := func(tgt Target) {
+ ttype := tgt.TypeNode
+ tkind := ttype.Kind
+ depth := ttype.Depth
+ edge := tgt.ChildName
+ if depth == 0 {
+ fmt.Printf("Serialization info for type %q (%s):\n", ttype.Type, tkind)
+ return
+ }
+ fmt.Printf("%s%s", strings.Repeat(" ", depth-1), edge.SerializationInfo())
+ suffix := ""
+ if ttype.IsLeaf() {
+ x := ttype.String()
+ _ = x
+ suffix = fmt.Sprintf(":%s", tkind)
+ }
+ fmt.Printf("%s\n", suffix)
+ }
+ tgt.Visit(action)
+}
+
+// LeafStatsReport prints out a report for the leafs type count.
+func LeafStatsReport(xTgt Target) {
+ fmt.Printf("\n\nLeaf-type stats for type %s:\n\n", &xTgt.TypeNode)
+ leaves := []TypeNode{}
+ leafCollector := func(tgt Target) {
+ if tgt.TypeNode.IsLeaf() {
+ leaves = append(leaves, tgt.TypeNode)
+ }
+ }
+
+ xTgt.Visit(leafCollector)
+ fmt.Printf("Found %d leaves\n\n", len(leaves))
+
+ stats := make(map[string]int)
+ for _, leaf := range leaves {
+ key := fmt.Sprintf("%s/%s", leaf.Type, leaf.Kind)
+ if _, ok := stats[key]; !ok {
+ stats[key] = 0
+ }
+ stats[key]++
+ }
+ printSortedStats(stats)
+}
+
+// MaxDepthReport prints out a report for the max depth of the type tree.
+func MaxDepthReport(xTgt Target) int {
+ fmt.Printf("\n\nMax depth stats for type %s:\n\n", &xTgt.TypeNode)
+ maxDepth := 0
+ maxDepthCollector := func(tgt Target) {
+ if tgt.TypeNode.Depth > maxDepth {
+ maxDepth = tgt.TypeNode.Depth
+ }
+ }
+ xTgt.Visit(maxDepthCollector)
+ fmt.Printf("Max depth is %d\n", maxDepth)
+ return maxDepth
+}
+
+type keyValue struct {
+ Key string
+ Value int
+}
+
+func printSortedStats(stats map[string]int) {
+ // Create a slice of key-value pairs
+ var kvSlice []keyValue
+ for k, v := range stats {
+ kvSlice = append(kvSlice, keyValue{k, v})
+ }
+
+ // Sort the slice by the count in descending order
+ sort.Slice(kvSlice, func(i, j int) bool {
+ return kvSlice[i].Value > kvSlice[j].Value
+ })
+
+ // Print the sorted slice
+ for _, kv := range kvSlice {
+ fmt.Printf("%s: %d\n", kv.Key, kv.Value)
+ }
+}
diff --git a/tools/x-repo-types/typeAnalyzer/typeAnalyzer_test.go b/tools/x-repo-types/typeAnalyzer/typeAnalyzer_test.go
new file mode 100644
index 000000000..632a38b52
--- /dev/null
+++ b/tools/x-repo-types/typeAnalyzer/typeAnalyzer_test.go
@@ -0,0 +1,449 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package main // cannot use main_type for main package?
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestEdgeFromLabel(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ testCases := []struct {
+ label string
+ expected ChildName
+ expectError bool
+ }{
+ {
+ label: "[foo](bar)",
+ expected: ChildName{Name: "foo", Tag: "bar"},
+ expectError: false,
+ },
+ {
+ label: "[foo]()",
+ expected: ChildName{Name: "foo", Tag: ""},
+ expectError: false,
+ },
+ {
+ label: "[](bar)",
+ expected: ChildName{Name: "", Tag: "bar"},
+ expectError: false,
+ },
+ {
+ label: "[]()",
+ expected: ChildName{Name: "", Tag: ""},
+ expectError: false,
+ },
+ {
+ label: "[f[]()oo](()(()",
+ expected: ChildName{Name: "f[]()oo", Tag: "()(("},
+ expectError: false,
+ },
+ {
+ label: "foo:bar",
+ expected: ChildName{},
+ expectError: true,
+ },
+ {
+ label: "[f[]()oo](()((",
+ expected: ChildName{},
+ expectError: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc // capture range variable
+ t.Run(tc.label, func(t *testing.T) {
+ t.Parallel()
+ edge, err := ChildNameFromLabel(tc.label)
+ if tc.expectError {
+ require.Error(t, err)
+ require.Equal(t, ChildName{}, edge)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, tc.expected, edge)
+ require.Equal(t, tc.label, edge.String())
+ }
+ })
+ }
+}
+
+type Node struct {
+ Name string
+ Next *Node
+}
+
+type Senior struct {
+ Children []Parent
+}
+
+type Parent struct {
+ Granddaughter *Child
+}
+
+type Child struct {
+ Grandpa Senior
+}
+
+type Family struct {
+ Brother, Sister Kid
+}
+
+type Kid struct {
+ Age int
+ Name string
+}
+
+func TestBuild(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ testcases := []struct {
+ name string
+ x interface{}
+ depth int
+ }{
+ {
+ name: "recursive 0",
+ x: Node{},
+ depth: 2,
+ },
+ {
+ name: "recursive 1",
+ x: Senior{},
+ depth: 5,
+ },
+ {
+ name: "recursive 2",
+ x: Child{},
+ depth: 5,
+ },
+ {
+ name: "basic struct",
+ x: struct{ A int }{},
+ depth: 1,
+ },
+ {
+ name: "basic codec",
+ x: struct {
+ B int `codec:"A"`
+ }{},
+ depth: 1,
+ },
+ {
+ name: "deeper unexported",
+ x: struct {
+ a []string
+ B string
+ }{},
+ depth: 1,
+ },
+ {
+ name: "deeper exported",
+ x: struct {
+ A []string
+ b int
+ }{},
+ depth: 2,
+ },
+ {
+ name: "embed flattened",
+ x: func() interface{} {
+ type Embedded struct{ A int }
+ return struct{ Embedded }{}
+ }(),
+ depth: 1,
+ },
+ {
+ name: "primitive alias",
+ x: func() interface{} {
+ type MYINT int
+ var i MYINT
+ return i
+ }(),
+ depth: 0,
+ },
+ {
+ name: "primitive type",
+ x: 5,
+ depth: 0,
+ },
+ {
+ name: "nested embeds 1",
+ x: func() interface{} {
+ type Embedded struct{ A int }
+ type Embedded2 struct{ Embedded }
+ return struct{ Embedded2 }{}
+ }(),
+ depth: 1,
+ },
+ }
+
+ for _, tc := range testcases {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ xRoot := MakeType(tc.x)
+ xRoot.Build()
+ tgt := Target{TypeNode: xRoot}
+ tgt.PrintSerializable()
+ require.Equal(t, tc.depth, MaxDepthReport(tgt), "test case: %s", tc.name)
+ })
+ }
+}
+
+func TestDiffErrors(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ testcases := []struct {
+ name string
+ x interface{}
+ y interface{}
+ equal bool
+ }{
+ {
+ name: "basic equal",
+ x: struct{ A int }{},
+ y: struct{ A int }{},
+ equal: true,
+ },
+ {
+ name: "basic codec equal",
+ x: struct{ A int }{},
+ y: struct {
+ B int `codec:"A"`
+ }{},
+ equal: true,
+ },
+ {
+ name: "equal because only care about exported",
+ x: struct {
+ a int
+ B string
+ }{},
+ y: struct{ c, B string }{},
+ equal: true,
+ },
+ {
+ name: "basic codec not equal",
+ x: struct{ A int }{},
+ y: struct {
+ A int `codec:"B"`
+ }{},
+ equal: false,
+ },
+ {
+ name: "basic field not equal",
+ x: struct{ A int }{},
+ y: struct{ B int }{},
+ equal: false,
+ },
+ {
+ name: "embed flattened",
+ x: struct{ A int }{},
+ y: func() interface{} {
+ type Embedded struct{ A int }
+ return struct{ Embedded }{}
+ }(),
+ equal: true,
+ },
+ {
+ name: "embed flattened not equal",
+ x: struct{ A int }{},
+ y: func() interface{} {
+ type Embedded struct{ B int }
+ return struct{ Embedded }{}
+ }(),
+ equal: false,
+ },
+ {
+ name: "primitive types equal",
+ x: func() interface{} {
+ type MYINT int
+ var i MYINT
+ return i
+ }(),
+ y: func() interface{} {
+ type MYOTHERINT int
+ var i MYOTHERINT
+ return i
+ }(),
+ equal: true,
+ },
+ {
+ name: "primitive type and primitive equal",
+ x: func() interface{} {
+ type MYINT int
+ var i MYINT
+ return i
+ }(),
+ y: 5,
+ equal: true,
+ },
+ {
+ name: "primitives not equal",
+ x: func() interface{} {
+ type MYINT int
+ var i MYINT
+ return i
+ }(),
+ y: uint(5),
+ equal: false,
+ },
+ {
+ name: "nested embeds 2",
+ x: func() interface{} {
+ type Embedded struct{ A int }
+ type Embedded2 struct{ Embedded }
+ return struct{ Embedded2 }{}
+ }(),
+ y: struct{ A int }{},
+ equal: true,
+ },
+ {
+ name: "field order",
+ x: struct{ A, B int }{},
+ y: struct{ B, A int }{},
+ equal: true,
+ },
+ }
+
+ for _, tc := range testcases {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ xRoot, yRoot, diff, err := StructDiff(tc.x, tc.y, nil)
+ require.NoError(t, err)
+ require.Equal(t, tc.equal, diff.Empty(), "test case: %s, report: %s", tc.name, Report(xRoot, yRoot, diff))
+ })
+ }
+}
+
+func TestBuildWithCyclicCheck(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ testcases := []struct {
+ name string
+ x interface{}
+ path []string
+ }{
+ {
+ name: "recursive 0",
+ x: Node{},
+ path: []string{
+ "github.com/algorand/go-algorand/tools/x-repo-types/typeAnalyzer :: \"main.Node\" (struct)",
+ "github.com/algorand/go-algorand/tools/x-repo-types/typeAnalyzer :: \"main.Node\" (struct)",
+ },
+ },
+ {
+ name: "recursive 1",
+ x: Senior{},
+ path: []string{
+ "github.com/algorand/go-algorand/tools/x-repo-types/typeAnalyzer :: \"main.Senior\" (struct)",
+ "github.com/algorand/go-algorand/tools/x-repo-types/typeAnalyzer :: \"main.Parent\" (struct)",
+ "github.com/algorand/go-algorand/tools/x-repo-types/typeAnalyzer :: \"main.Child\" (struct)",
+ "github.com/algorand/go-algorand/tools/x-repo-types/typeAnalyzer :: \"main.Senior\" (struct)",
+ },
+ },
+ {
+ name: "recursive 2",
+ x: Child{},
+ path: []string{
+ "github.com/algorand/go-algorand/tools/x-repo-types/typeAnalyzer :: \"main.Child\" (struct)",
+ "github.com/algorand/go-algorand/tools/x-repo-types/typeAnalyzer :: \"main.Senior\" (struct)",
+ "github.com/algorand/go-algorand/tools/x-repo-types/typeAnalyzer :: \"main.Parent\" (struct)",
+ "github.com/algorand/go-algorand/tools/x-repo-types/typeAnalyzer :: \"main.Child\" (struct)",
+ },
+ },
+ {
+ name: "basic struct",
+ x: struct{ A int }{},
+ path: []string{},
+ },
+ {
+ name: "basic codec",
+ x: struct {
+ B int `codec:"A"`
+ }{},
+ path: []string{},
+ },
+ {
+ name: "deeper unexported",
+ x: struct {
+ a []string
+ B string
+ }{},
+ path: []string{},
+ },
+ {
+ name: "deeper exported",
+ x: struct {
+ A []string
+ b int
+ }{},
+ path: []string{},
+ },
+ {
+ name: "embed flattened",
+ x: func() interface{} {
+ type Embedded struct{ A int }
+ return struct{ Embedded }{}
+ }(),
+ path: []string{},
+ },
+ {
+ name: "primitive alias",
+ x: func() interface{} {
+ type MYINT int
+ var i MYINT
+ return i
+ }(),
+ path: []string{},
+ },
+ {
+ name: "primitive type",
+ x: 5,
+ path: []string{},
+ },
+ {
+ name: "types may reappear with no cycles",
+ x: Family{},
+ path: []string{},
+ },
+ }
+
+ for _, tc := range testcases {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ xRoot := MakeType(tc.x)
+ cycle := xRoot.Build()
+ hasCycle := len(cycle) > 0
+ expectedHasCycle := len(tc.path) > 0
+ require.Equal(t, expectedHasCycle, hasCycle, `test case: %s
+ cycle: %s
+ expected: %#v`, tc.name, cycle, tc.path)
+ require.Equal(t, fmt.Sprintf("%#v", tc.path), cycle.String(), `test case: %s
+ cycle: %s
+ expected: %#v`, tc.name, cycle, tc.path,
+ )
+ })
+ }
+}
diff --git a/tools/x-repo-types/xrt.go b/tools/x-repo-types/xrt.go
new file mode 100644
index 000000000..016f50ff3
--- /dev/null
+++ b/tools/x-repo-types/xrt.go
@@ -0,0 +1,333 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package main
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "text/template"
+
+ "github.com/spf13/cobra"
+
+ _ "embed"
+)
+
+//go:embed typeAnalyzer/main.tmpl
+var differTmpl string
+
+//go:embed typeAnalyzer/typeAnalyzer.go
+var typeAnalyzerGo string
+
+func main() {
+ var xPkg, xBranch, xType, yPkg, yBranch, yType, artifactPath string
+
+ rootCmd := &cobra.Command{
+ Use: "x-repo-types",
+ Short: "Compare types across repos",
+ Run: func(cmd *cobra.Command, args []string) {
+ if err := runApp(xPkg, xBranch, xType, yPkg, yBranch, yType, artifactPath); err != nil {
+ log.Fatal(err)
+ }
+ },
+ }
+
+ rootCmd.Flags().StringVar(&xPkg, "x-package", "", "Go repo and package for type x")
+ rootCmd.Flags().StringVar(&xBranch, "x-branch", "", "repository branch for type x")
+ rootCmd.Flags().StringVar(&xType, "x-type", "", "Exported type in the package for type x")
+ rootCmd.Flags().StringVar(&yPkg, "y-package", "", "Go repo and package for type for type y")
+ rootCmd.Flags().StringVar(&yBranch, "y-branch", "", "repository branch for type y")
+ rootCmd.Flags().StringVar(&yType, "y-type", "", "Exported type in the package for type y")
+ rootCmd.Flags().StringVar(&artifactPath, "artifact-path", "", "Path to write auxiliary code which will run after downloading go-types. If not provided, a temporary folder will be created.")
+
+ if err := rootCmd.Execute(); err != nil {
+ log.Fatal(err)
+ }
+}
+
+func runApp(xPkg, xBranch, xType, yPkg, yBranch, yType, artifactPath string) (err error) {
+ fileBackups, err := setUp()
+ fmt.Printf("fileBackups: %#v\n\n", fileBackups)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ fmt.Printf("tearDown to restore: %#v\n\n", fileBackups)
+ teardownErr := tearDown(fileBackups)
+ if teardownErr != nil {
+ fmt.Printf("problem during tearDown: %v\n", teardownErr)
+ err = teardownErr
+ }
+ }()
+
+ if xPkg == "" || xType == "" {
+ return fmt.Errorf("package:%s, and type:%s flags are required", xPkg, xType)
+ }
+ if yPkg == "" || yType == "" {
+ return fmt.Errorf("package:%s, and type:%s flags are required", yPkg, yType)
+ }
+
+ xPkgBranch := xPkg
+ if xBranch != "" {
+ xPkgBranch += "@" + xBranch
+ }
+ yPkgBranch := yPkg
+ if yBranch != "" {
+ yPkgBranch += "@" + yBranch
+ }
+
+ err = goGet(xPkgBranch)
+ if err != nil {
+ return err
+ }
+ err = goGet(yPkgBranch)
+ if err != nil {
+ return err
+ }
+
+ xParts := strings.Split(xPkg, "/")
+ yParts := strings.Split(yPkg, "/")
+
+ xRepo := strings.Join(xParts[:3], "/")
+ yRepo := strings.Join(yParts[:3], "/")
+
+ xPkgSuffix := strings.Join(xParts[3:], "/")
+ yPkgSuffix := strings.Join(yParts[3:], "/")
+
+ // Instantiate the type in a separate process as a "Smoke Test"
+ err = instantiate(xRepo, xPkgSuffix, xType)
+ if err != nil {
+ return err
+ }
+ err = instantiate(yRepo, yPkgSuffix, yType)
+ if err != nil {
+ return err
+ }
+
+ // Compare the types by running the template typeAnalyzer/main.tmpl in a separate process
+ // typeAnalyzer/main will return an error if the types are not the same
+ // here we propagate the error to the caller, so as to fail the test.
+ err = serializationDiff(artifactPath, xRepo, xPkgSuffix, xType, yRepo, yPkgSuffix, yType)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func setUp() (map[string]string, error) {
+ pkgRoot, err := findPkgRoot()
+ if err != nil {
+ return nil, err
+ }
+ if pkgRoot == "" {
+ fmt.Print("No package root found. Will not attempt to backup go.mod and go.sum files.\n\n")
+ return nil, nil
+ }
+
+ fmt.Printf("Will look for and backup go.mod and go.sum files in pkgRoot: %s\n\n", pkgRoot)
+
+ goModPath := filepath.Join(pkgRoot, "go.mod")
+ goSumPath := filepath.Join(pkgRoot, "go.sum")
+
+ backups := make(map[string]string)
+ for _, path := range []string{goModPath, goSumPath} {
+ backup, err := backupFile(path)
+ if err != nil {
+ return nil, err
+ }
+ backups[backup] = path
+ }
+ return backups, nil
+}
+
+func tearDown(fileBackups map[string]string) error {
+ for backup, path := range fileBackups {
+ err := restoreFile(backup, path)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func backupFile(src string) (string, error) {
+ content, err := ioutil.ReadFile(src)
+ if err != nil {
+ return "", err
+ }
+
+ tmpFile, err := ioutil.TempFile("", "backup-*")
+ if err != nil {
+ return "", err
+ }
+
+ err = ioutil.WriteFile(tmpFile.Name(), content, 0644)
+ if err != nil {
+ return "", err
+ }
+
+ return tmpFile.Name(), nil
+}
+
+func findPkgRoot() (string, error) {
+ cmd := exec.Command("go", "list", "-m", "-f", "{{.Dir}}")
+ var stdout, stderr bytes.Buffer
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+
+ err := cmd.Run()
+ if err != nil {
+ return "", errors.New(stderr.String())
+ }
+
+ return strings.TrimSpace(stdout.String()), nil
+}
+
+func restoreFile(src, dst string) error {
+ // assuming that dst already exists
+ dstFileInfo, err := os.Stat(dst)
+ if err != nil {
+ return err
+ }
+
+ content, err := ioutil.ReadFile(src)
+ if err != nil {
+ return err
+ }
+
+ err = ioutil.WriteFile(dst, content, dstFileInfo.Mode())
+ if err != nil {
+ return err
+ }
+
+ err = os.Remove(src)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func goGet(repo string) error {
+ fmt.Println("Downloading repo:", repo)
+ cmd := exec.Command("go", "get", repo)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ return cmd.Run()
+}
+
+func instantiate(repo, pkgPath, typeName string) error {
+ fmt.Println("Instantiating type for:", typeName)
+
+ pkgParts := strings.Split(pkgPath, "/")
+ pkgOnly := pkgParts[len(pkgParts)-1]
+
+ code := fmt.Sprintf(`package main
+
+import (
+ "fmt"
+ "%s/%s"
+)
+
+func main() {
+ var item %s.%s
+ fmt.Printf("Instantiated: %%#v\n\n", item)
+}
+`, repo, pkgPath, pkgOnly, typeName)
+
+ tmpDir, err := os.MkdirTemp(".", "instantiate-*")
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(tmpDir)
+
+ tmpFile := filepath.Join(tmpDir, "main.go")
+ err = os.WriteFile(tmpFile, []byte(code), 0644)
+ if err != nil {
+ return err
+ }
+
+ //nolint:gosec // tmpFile is defined above so no security concerns here
+ cmd := exec.Command("go", "run", tmpFile)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ return cmd.Run()
+}
+
+// serializationDiff runs the typeAnalyzer/main.tmpl template in a separate process.
+// If you want to persist the generated artifacts, pass in a non-empty artifactPath.
+func serializationDiff(artifactPath, xRepo, xPkgPath, xType, yRepo, yPkgPath, yType string) error {
+ fmt.Printf("Diffing %s from package %s VS %s from package %s...\n", xType, xPkgPath, yType, yPkgPath)
+
+ tmpl, err := template.New("code").Parse(differTmpl)
+ if err != nil {
+ fmt.Println("Error parsing template:", err)
+ os.Exit(1)
+ }
+
+ var buf bytes.Buffer
+ err = tmpl.Execute(&buf, map[string]string{
+ "XModulePath": xRepo,
+ "XPackagePath": xPkgPath,
+ "XTypeInstance": xType,
+ "YModulePath": yRepo,
+ "YPackagePath": yPkgPath,
+ "YTypeInstance": yType,
+ })
+ if err != nil {
+ fmt.Println("Error executing template:", err)
+ os.Exit(1)
+ }
+
+ var main, typeAnalyzer string
+ if artifactPath == "" {
+ ap, err := os.MkdirTemp("", "typeAnalyzer")
+ if err != nil {
+ fmt.Println("Error creating typeAnalyzer temp directory:", err)
+ os.Exit(1)
+ }
+ artifactPath = ap
+ defer os.RemoveAll(artifactPath)
+ }
+
+ main = filepath.Join(artifactPath, "main.go")
+ typeAnalyzer = filepath.Join(artifactPath, "typeAnalyzer.go")
+
+ err = os.WriteFile(main, buf.Bytes(), 0644)
+ if err != nil {
+ return err
+ }
+
+ err = os.WriteFile(typeAnalyzer, []byte(typeAnalyzerGo), 0644)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("Saved x-repo-types code to directory: [%s]\n", artifactPath)
+
+ //nolint:gosec // main and typeAnalyzer are hard-coded above so no security concerns here
+ cmd := exec.Command("go", "run", main, typeAnalyzer)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ return cmd.Run()
+}
diff --git a/tools/x-repo-types/xrt_test.go b/tools/x-repo-types/xrt_test.go
new file mode 100644
index 000000000..119954e63
--- /dev/null
+++ b/tools/x-repo-types/xrt_test.go
@@ -0,0 +1,103 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package main
+
+import (
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+type testCase struct {
+ name string
+ xPkg, xBranch, xType, yPkg, yBranch, yType string
+ skip bool
+ skipReason string
+}
+
+func TestCrossRepoTypes(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ testCases := []testCase{
+ {
+ name: "SDK: StateDelta",
+ xPkg: "github.com/algorand/go-algorand/ledger/ledgercore",
+ xBranch: "",
+ xType: "StateDelta",
+ yPkg: "github.com/algorand/go-algorand-sdk/v2/types",
+ yBranch: "develop",
+ yType: "LedgerStateDelta",
+ },
+ {
+ name: "goal-v-sdk-genesis",
+ xPkg: "github.com/algorand/go-algorand/data/bookkeeping",
+ xType: "Genesis",
+ yPkg: "github.com/algorand/go-algorand-sdk/v2/types",
+ yBranch: "develop",
+ yType: "Genesis",
+ skip: true,
+ skipReason: `LEVEL 3 of goal basics.AccountData has 12 fields missing from SDK types.Account`,
+ },
+ {
+ name: "goal-v-sdk-block",
+ xPkg: "github.com/algorand/go-algorand/data/bookkeeping",
+ xType: "Block",
+ yPkg: "github.com/algorand/go-algorand-sdk/v2/types",
+ yBranch: "develop",
+ yType: "Block",
+ skip: true,
+ skipReason: `Several issues. For example: LEVEL 5 of goal bookkeeping.Block is EvalDelta with field [SharedAccts](codec:"sa,allocbound=config.MaxEvalDeltaAccounts") VS SDK types.EvalDelta is missing SharedAccts field`,
+ },
+ {
+ name: "goal-v-sdk-blockheader",
+ xPkg: "github.com/algorand/go-algorand/data/bookkeeping",
+ xType: "BlockHeader",
+ yPkg: "github.com/algorand/go-algorand-sdk/v2/types",
+ yBranch: "develop",
+ yType: "BlockHeader",
+ },
+ {
+ name: "goal-v-sdk-stateproof",
+ xPkg: "github.com/algorand/go-algorand/crypto/stateproof",
+ xType: "StateProof",
+ yPkg: "github.com/algorand/go-algorand-sdk/v2/types",
+ yBranch: "develop",
+ yType: "StateProof",
+ },
+ {
+ name: "goal-v-spv-stateproof",
+ xPkg: "github.com/algorand/go-algorand/crypto/stateproof",
+ xType: "StateProof",
+ yPkg: "github.com/algorand/go-stateproof-verification/stateproof",
+ yType: "StateProof",
+ },
+ }
+
+ for _, tc := range testCases {
+ // These should be run in serial as they modify go.mod, go.sum and typeAnalyzer/main.go
+ // TODO: it probably is preferrable to setup and `go get` everything _before_ running the tests
+ // and tearDown after the tests are done.
+ t.Run(tc.name, func(t *testing.T) {
+ if tc.skip {
+ t.Skip(tc.skipReason)
+ }
+ err := runApp(tc.xPkg, tc.xBranch, tc.xType, tc.yPkg, tc.yBranch, tc.yType, "")
+ require.NoError(t, err)
+ })
+ }
+}
diff --git a/data/transactions/verify/streamverifier.go b/util/execpool/stream.go
index a5a5fe512..af2bb0809 100644
--- a/data/transactions/verify/streamverifier.go
+++ b/util/execpool/stream.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-package verify
+package execpool
import (
"context"
@@ -23,7 +23,6 @@ import (
"time"
"github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/util/execpool"
)
// ErrShuttingDownError is the error returned when a job is not processed because the service is shutting down
@@ -38,6 +37,8 @@ var ErrShuttingDownError = errors.New("not processed, execpool service is shutti
// for processing before waitForNextJobDuration.
const waitForNextJobDuration = 2 * time.Millisecond
+const txnPerWorksetThreshold = 32
+
// batchSizeBlockLimit is the limit when the batch exceeds, will be added to the exec pool, even if the pool is saturated
// and the stream will be blocked until the exec pool accepts the batch
const batchSizeBlockLimit = 1024
@@ -61,14 +62,14 @@ type BatchProcessor interface {
// StreamToBatch makes batches from incoming stream of jobs, and submits the batches to the exec pool
type StreamToBatch struct {
inputChan <-chan InputJob
- executionPool execpool.BacklogPool
+ executionPool BacklogPool
ctx context.Context
activeLoopWg sync.WaitGroup
batchProcessor BatchProcessor
}
// MakeStreamToBatch creates a new stream to batch converter
-func MakeStreamToBatch(inputChan <-chan InputJob, execPool execpool.BacklogPool,
+func MakeStreamToBatch(inputChan <-chan InputJob, execPool BacklogPool,
batchProcessor BatchProcessor) *StreamToBatch {
return &StreamToBatch{
diff --git a/util/execpool/stream_test.go b/util/execpool/stream_test.go
new file mode 100644
index 000000000..a6f3b17da
--- /dev/null
+++ b/util/execpool/stream_test.go
@@ -0,0 +1,442 @@
+// Copyright (C) 2019-2023 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package execpool
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+// implements BatchProcessor interface for testing purposes
+type mockBatchProcessor struct {
+ notify chan struct{} // notify the test that cleanup was called
+}
+
+func (mbp *mockBatchProcessor) ProcessBatch(jobs []InputJob) {
+ for i := range jobs {
+ job := jobs[i].(*mockJob)
+ job.processed = true
+ job.batchSize = len(jobs)
+ job.batchOrder = i
+ if job.callback != nil {
+ job.callback(job.id)
+ }
+ }
+}
+
+func (mbp *mockBatchProcessor) GetErredUnprocessed(ue InputJob, err error) {
+ job := ue.(*mockJob)
+ job.returnError = err
+}
+
+func (mbp *mockBatchProcessor) Cleanup(ue []InputJob, err error) {
+ for i := range ue {
+ mbp.GetErredUnprocessed(ue[i], err)
+ }
+ if mbp.notify != nil && len(ue) > 0 {
+ mbp.notify <- struct{}{}
+ }
+}
+
+// implements InputJob interface
+type mockJob struct {
+ id int
+ numberOfItems uint64
+ jobError error
+ returnError error
+ processed bool
+ batchSize int
+ batchOrder int
+ callback func(id int)
+}
+
+func (mj *mockJob) GetNumberOfBatchableItems() (count uint64, err error) {
+ return mj.numberOfItems, mj.jobError
+}
+
+type mockPool struct {
+ pool
+ hold chan struct{} // used to sync the EnqueueBacklog call with the test
+ err error // when not nil, EnqueueBacklog will return the err instead of executing the task
+ poolCapacity chan struct{} // mimics the pool capacity which blocks EnqueueBacklog
+ asyncDelay chan struct{} // used to control when the task gets executed after EnqueueBacklog queues and returns
+}
+
+func (mp *mockPool) EnqueueBacklog(enqueueCtx context.Context, t ExecFunc, arg interface{}, out chan interface{}) error {
+ // allow the test to know when the exec pool is executing the job
+ <-mp.hold
+ // simulate the execution of the job by the pool
+ if mp.err != nil {
+ // return the mock error
+ return mp.err
+ }
+ mp.poolCapacity <- struct{}{}
+ go func() {
+ mp.asyncDelay <- struct{}{}
+ t(arg)
+ }()
+ return nil
+}
+
+func (mp *mockPool) BufferSize() (length, capacity int) {
+ return len(mp.poolCapacity), cap(mp.poolCapacity)
+}
+
+func testStreamToBatchCore(wg *sync.WaitGroup, mockJobs <-chan *mockJob, done <-chan struct{}, t *testing.T) {
+ defer wg.Done()
+ ctx, cancel := context.WithCancel(context.Background())
+ verificationPool := MakeBacklog(nil, 0, LowPriority, t)
+ defer verificationPool.Shutdown()
+
+ inputChan := make(chan InputJob)
+ mbp := mockBatchProcessor{}
+ sv := MakeStreamToBatch(inputChan, verificationPool, &mbp)
+ sv.Start(ctx)
+
+ for j := range mockJobs {
+ inputChan <- j
+ }
+ <-done
+ cancel()
+ sv.WaitForStop()
+}
+
+// TestStreamToBatchBasic tests the basic functionality
+func TestStreamToBatchBasic(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ numJobs := 400
+ // for GetNumberOfBatchableItems errors: 400 / 99
+ numJobsToProcess := 400 - (400/99 + 1)
+ // processedChan will notify whenn all the jobs are processed
+ processedChan := make(chan struct{}, numJobsToProcess-1)
+ done := make(chan struct{})
+ // callback is needed to know when the processing should stop
+ callback := func(id int) {
+ select {
+ case processedChan <- struct{}{}:
+ default:
+ // this was the last job
+ close(done)
+ }
+ }
+ numError := fmt.Errorf("err on GetNumberOfBatchableItems")
+ mockJobs := make([]*mockJob, numJobs, numJobs)
+ for i := 0; i < numJobs; i++ {
+ mockJobs[i] = &mockJob{
+ id: i,
+ // get some jobs with 0 items too
+ numberOfItems: uint64(i % 5),
+ callback: callback}
+
+ if i%99 == 0 {
+ // get GetNumberOfBatchableItems to report an error
+ mockJobs[i].jobError = numError
+ }
+ if i%101 == 0 {
+ // have a batch exceeding batchSizeBlockLimit limit
+ mockJobs[i].numberOfItems = batchSizeBlockLimit + 1
+ }
+ }
+ jobChan := make(chan *mockJob)
+ wg := sync.WaitGroup{}
+ wg.Add(2)
+ go testStreamToBatchCore(&wg, jobChan, done, t)
+
+ go func() {
+ defer wg.Done()
+ for i := range mockJobs {
+ jobChan <- mockJobs[i]
+ }
+ close(jobChan)
+ <-done
+ }()
+ wg.Wait()
+ for i := 0; i < numJobs; i++ {
+ if i%99 == 0 {
+ // this should be GetNumberOfBatchableItems
+ require.ErrorIs(t, mockJobs[i].returnError, numError)
+ require.False(t, mockJobs[i].processed)
+ continue
+ }
+ if i%5 == 0 {
+ // this should be processed alone
+ if 1 != mockJobs[i].batchSize {
+ require.Equal(t, 1, mockJobs[i].batchSize)
+ }
+ }
+ if i%101 == 0 {
+ // this should be the last in the batch
+ require.Equal(t, mockJobs[i].batchSize-1, mockJobs[i].batchOrder)
+ }
+ if mockJobs[i].returnError != nil {
+ require.Nil(t, mockJobs[i].returnError)
+ }
+ require.True(t, mockJobs[i].processed)
+ }
+}
+
+// TestNoInputYet let the service start and get to the timeout without any inputs
+func TestNoInputYet(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ numJobs := 1
+ done := make(chan struct{})
+ jobChan := make(chan *mockJob)
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+ go testStreamToBatchCore(&wg, jobChan, done, t)
+ callback := func(id int) {
+ if id == numJobs-1 {
+ close(done)
+ }
+ }
+ // Wait to trigger the timer once with 0 elements
+ time.Sleep(2 * waitForNextJobDuration)
+
+ // send a job, make sure it goes through
+ mockJob := &mockJob{
+ numberOfItems: uint64(0),
+ callback: callback}
+ jobChan <- mockJob
+ <-done
+ require.Nil(t, mockJob.returnError)
+ require.True(t, mockJob.processed)
+ require.Equal(t, 1, mockJob.batchSize)
+ close(jobChan)
+ wg.Wait()
+}
+
+// TestMutipleBatchAttempts tests the behavior when multiple batch attempts will fail and the stream blocks
+func TestMutipleBatchAttempts(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ mp := mockPool{
+ hold: make(chan struct{}),
+ err: nil,
+ poolCapacity: make(chan struct{}, 1),
+ asyncDelay: make(chan struct{}, 10),
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+
+ inputChan := make(chan InputJob)
+ mbp := mockBatchProcessor{}
+ sv := MakeStreamToBatch(inputChan, &mp, &mbp)
+ sv.Start(ctx)
+
+ var jobCalled int
+ jobCalledRef := &jobCalled
+ callbackFeedback := make(chan struct{})
+
+ mj := mockJob{
+ numberOfItems: uint64(txnPerWorksetThreshold + 1),
+ id: 1,
+ callback: func(id int) {
+ *jobCalledRef = *jobCalledRef + id
+ <-callbackFeedback
+ },
+ }
+ // first saturate the pool
+ mp.poolCapacity <- struct{}{}
+ inputChan <- &mj
+
+ // wait for the job to be submitted to the pool
+ // since this is only a single job with 1 task, and the pool is at capacity,
+ // this will only happen when the numberOfBatchAttempts == 1
+ mp.hold <- struct{}{}
+
+ // here, the pool is saturated, and the stream should be blocked
+ select {
+ case inputChan <- &mj:
+ require.Fail(t, "the stream should be blocked here")
+ default:
+ }
+
+ // now let the pool regian capacity
+ <-mp.poolCapacity
+
+ // make sure it is processed before reading the value
+ callbackFeedback <- struct{}{}
+ require.Equal(t, 1, jobCalled)
+
+ // the stream should be unblocked now
+ inputChan <- &mj
+
+ // let the next job go through
+ mp.hold <- struct{}{}
+ // give the pool the capacity for it to process
+ <-mp.poolCapacity
+
+ // make sure it is processed before reading the value
+ callbackFeedback <- struct{}{}
+ require.Equal(t, 2, jobCalled)
+
+ cancel()
+ sv.WaitForStop()
+}
+
+// TestErrors tests all the cases where exec pool returned error is handled
+// by ending the stream processing
+func TestErrors(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ mp := mockPool{
+ hold: make(chan struct{}),
+ err: fmt.Errorf("Test error"),
+ poolCapacity: make(chan struct{}, 5),
+ asyncDelay: make(chan struct{}, 10),
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ inputChan := make(chan InputJob)
+ mbp := mockBatchProcessor{}
+ sv := MakeStreamToBatch(inputChan, &mp, &mbp)
+
+ /***************************************************/
+ // error adding to the pool when numberOfBatchable=0
+ /***************************************************/
+ sv.Start(ctx)
+ mj := mockJob{
+ numberOfItems: 0,
+ }
+ inputChan <- &mj
+ // let the enqueue pool process and return an error
+ mp.hold <- struct{}{}
+ // if errored, should not process the callback on the job
+ // This is based on the mockPool EnqueueBacklog behavior
+ require.False(t, mj.processed)
+ // the service should end
+ sv.WaitForStop()
+
+ /***************************************************/
+ // error adding to the pool when < txnPerWorksetThreshold
+ /***************************************************/
+ // Case where the timer ticks
+ sv.Start(ctx)
+ mj.numberOfItems = txnPerWorksetThreshold - 1
+ inputChan <- &mj
+ // let the enqueue pool process and return an error
+ mp.hold <- struct{}{}
+ require.False(t, mj.processed)
+ // the service should end
+ sv.WaitForStop()
+
+ /***************************************************/
+ // error adding to the pool when <= batchSizeBlockLimit
+ /***************************************************/
+ // Case where the timer ticks
+ sv.Start(ctx)
+ mj.numberOfItems = batchSizeBlockLimit
+ inputChan <- &mj
+ // let the enqueue pool process and return an error
+ mp.hold <- struct{}{}
+ require.False(t, mj.processed)
+ // the service should end
+ sv.WaitForStop()
+
+ /***************************************************/
+ // error adding to the pool when > batchSizeBlockLimit
+ /***************************************************/
+ // Case where the timer ticks
+ sv.Start(ctx)
+ mj.numberOfItems = batchSizeBlockLimit + 1
+ inputChan <- &mj
+ // let the enqueue pool process and return an error
+ mp.hold <- struct{}{}
+ require.False(t, mj.processed)
+ // the service should end
+ sv.WaitForStop()
+}
+
+// TestPendingJobOnRestart makes sure a pending job in the exec pool is cancled
+// when the Stream ctx is cancled, and a now one started with a new ctx
+func TestPendingJobOnRestart(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ mp := mockPool{
+ hold: make(chan struct{}),
+ poolCapacity: make(chan struct{}, 2),
+ asyncDelay: make(chan struct{}),
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ inputChan := make(chan InputJob)
+ mbp := mockBatchProcessor{
+ notify: make(chan struct{}, 1),
+ }
+ sv := MakeStreamToBatch(inputChan, &mp, &mbp)
+
+ // start with a saturated pool so that the job will not go through before
+ // the ctx is cancled
+ mp.poolCapacity <- struct{}{}
+
+ sv.Start(ctx)
+ mj := mockJob{
+ numberOfItems: 1,
+ }
+ inputChan <- &mj
+ // wait for the job to be submitted to the exec pool, waiting for capacity
+ mp.hold <- struct{}{}
+
+ // now the job should be waiting in the exec pool queue waiting to be executed
+
+ // cancel the ctx
+ cancel()
+ // make sure EnqueueBacklog has returned and the stream can terminate
+ sv.WaitForStop()
+
+ // start a new session
+ ctx, cancel = context.WithCancel(context.Background())
+ sv.Start(ctx)
+
+ // submit a new job
+ callbackFeedback := make(chan struct{}, 1)
+ mjNew := mockJob{
+ numberOfItems: 1,
+ callback: func(id int) {
+ callbackFeedback <- struct{}{}
+ },
+ }
+ inputChan <- &mjNew
+ mp.hold <- struct{}{}
+ <-mp.poolCapacity
+
+ // when the exec pool tries to execute the jobs,
+ // the function in addBatchToThePoolNow should abort the old and process the new
+ <-mp.asyncDelay
+ <-mp.asyncDelay
+
+ // wait for the notifiation from cleanup before checking the TestPendingJobOnRestart
+ <-mbp.notify
+ require.Error(t, mj.returnError)
+ require.False(t, mj.processed)
+
+ <-callbackFeedback
+ require.True(t, mjNew.processed)
+
+ cancel()
+ sv.WaitForStop()
+}
diff --git a/util/rateLimit.go b/util/rateLimit.go
index c4e85c71e..c62264c32 100644
--- a/util/rateLimit.go
+++ b/util/rateLimit.go
@@ -171,6 +171,7 @@ func (erl *ElasticRateLimiter) DisableCongestionControl() {
// - there is not sufficient free capacity to assign a reserved capacity block
// - there is no reserved or shared capacity available for the client
func (erl *ElasticRateLimiter) ConsumeCapacity(c ErlClient) (*ErlCapacityGuard, error) {
+ var cg ErlCapacityGuard
var q capacityQueue
var err error
var exists bool
@@ -182,7 +183,8 @@ func (erl *ElasticRateLimiter) ConsumeCapacity(c ErlClient) (*ErlCapacityGuard,
erl.clientLock.RUnlock()
// Step 0: Check for, and create a capacity reservation if needed
- if !exists {
+ // Don't interact with reservations if the capacity-per-reservation is zero
+ if !exists && erl.CapacityPerReservation > 0 {
q, err = erl.openReservation(c)
if err != nil {
return nil, err
@@ -195,14 +197,17 @@ func (erl *ElasticRateLimiter) ConsumeCapacity(c ErlClient) (*ErlCapacityGuard,
return &ErlCapacityGuard{cq: q, cm: erl.cm}, nil
}
- // Step 1: Attempt consumption from the reserved queue
- cg, err := q.consume(erl.cm)
- if err == nil {
- if erl.cm != nil {
- erl.cm.Consumed(c, time.Now()) // notify the congestion manager that this client consumed from this queue
+ // Step 1: Attempt consumption from the reserved queue if one exists
+ if q != nil {
+ cg, err = q.consume(erl.cm)
+ if err == nil {
+ if erl.cm != nil {
+ erl.cm.Consumed(c, time.Now()) // notify the congestion manager that this client consumed from this queue
+ }
+ return &cg, nil
}
- return &cg, nil
}
+
// Step 2: Potentially gate shared queue access if the congestion manager disallows it
if erl.cm != nil &&
enableCM &&
@@ -212,6 +217,7 @@ func (erl *ElasticRateLimiter) ConsumeCapacity(c ErlClient) (*ErlCapacityGuard,
}
return nil, errConManDropped
}
+
// Step 3: Attempt consumption from the shared queue
cg, err = erl.sharedCapacity.consume(erl.cm)
if err != nil {
diff --git a/util/rateLimit_test.go b/util/rateLimit_test.go
index 669960ecf..fd7a03140 100644
--- a/util/rateLimit_test.go
+++ b/util/rateLimit_test.go
@@ -105,6 +105,30 @@ func TestReservations(t *testing.T) {
assert.Equal(t, 0, len(erl.capacityByClient))
}
+// When there is no reservation per client, the reservation map is not used
+// This is so we never wait on a capacity queue which would not ever vend
+func TestZeroSizeReservations(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ client1 := mockClient("client1")
+ client2 := mockClient("client2")
+ erl := NewElasticRateLimiter(4, 0, time.Second, nil)
+
+ _, err := erl.ConsumeCapacity(client1)
+ time.Sleep(100 * time.Millisecond)
+ assert.Equal(t, 0, len(erl.capacityByClient))
+ assert.NoError(t, err)
+
+ _, err = erl.ConsumeCapacity(client2)
+ time.Sleep(100 * time.Millisecond)
+ assert.Equal(t, 0, len(erl.capacityByClient))
+ assert.NoError(t, err)
+
+ erl.closeReservation(client1)
+ assert.Equal(t, 0, len(erl.capacityByClient))
+ erl.closeReservation(client2)
+ assert.Equal(t, 0, len(erl.capacityByClient))
+}
+
func TestConsumeReleaseCapacity(t *testing.T) {
partitiontest.PartitionTest(t)
client := mockClient("client")
diff --git a/util/s3/s3Helper.go b/util/s3/s3Helper.go
index 8c70f14ed..9fc6f0691 100644
--- a/util/s3/s3Helper.go
+++ b/util/s3/s3Helper.go
@@ -245,15 +245,15 @@ func GetVersionFromName(name string) (version uint64, err error) {
return
}
var val uint64
- for index, match := range submatchAll[0] {
- if index > 0 {
- version <<= 16
- val, err = strconv.ParseUint(match, 10, 0)
- if err != nil {
- return
- }
- version += val
+ submatch := submatchAll[0][1:] // skip the first match which is the whole string
+ offsets := []int{0, 16, 24} // some bits for major (not really restricted), 16 bits for minor, 24 bits for patch
+ for index, match := range submatch {
+ version <<= offsets[index]
+ val, err = strconv.ParseUint(match, 10, 0)
+ if err != nil {
+ return
}
+ version += val
}
return
}
@@ -262,13 +262,13 @@ func GetVersionFromName(name string) (version uint64, err error) {
func GetVersionPartsFromVersion(version uint64) (major uint64, minor uint64, patch uint64, err error) {
val := version
- if val < 1<<32 {
+ if val < 1<<40 {
err = errors.New("versions below 1.0.0 not supported")
return
}
- patch = val & 0xffff
- val >>= 16
+ patch = val & 0xffffff
+ val >>= 24
minor = val & 0xffff
val >>= 16
major = val
diff --git a/util/s3/s3Helper_test.go b/util/s3/s3Helper_test.go
index c0502a3b0..8ba636c2a 100644
--- a/util/s3/s3Helper_test.go
+++ b/util/s3/s3Helper_test.go
@@ -17,6 +17,7 @@
package s3
import (
+ "fmt"
"os"
"reflect"
"testing"
@@ -174,6 +175,7 @@ func TestMakeS3SessionForDownloadWithBucket(t *testing.T) {
func TestGetVersionFromName(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
type args struct {
name string
@@ -181,12 +183,12 @@ func TestGetVersionFromName(t *testing.T) {
expected uint64
}
tests := []args{
- {name: "test 1 (major)", version: "_1.0.0", expected: 1 * 1 << 32},
- {name: "test 2 (major)", version: "_2.0.0", expected: 2 * 1 << 32},
- {name: "test 3 (minor)", version: "_1.1.0", expected: 1*1<<32 + 1*1<<16},
- {name: "test 4 (minor)", version: "_1.2.0", expected: 1*1<<32 + 2*1<<16},
- {name: "test 5 (patch)", version: "_1.0.1", expected: 1*1<<32 + 1},
- {name: "test 6 (patch)", version: "_1.0.2", expected: 1*1<<32 + 2},
+ {name: "test 1 (major)", version: "_1.0.0", expected: 1 * 1 << 40},
+ {name: "test 2 (major)", version: "_2.0.0", expected: 2 * 1 << 40},
+ {name: "test 3 (minor)", version: "_1.1.0", expected: 1*1<<40 + 1*1<<24},
+ {name: "test 4 (minor)", version: "_1.2.0", expected: 1*1<<40 + 2*1<<24},
+ {name: "test 5 (patch)", version: "_1.0.1", expected: 1*1<<40 + 1},
+ {name: "test 6 (patch)", version: "_1.0.2", expected: 1*1<<40 + 2},
}
for _, test := range tests {
@@ -196,8 +198,24 @@ func TestGetVersionFromName(t *testing.T) {
}
}
+func TestGetVersionFromNameCompare(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ name1 := "config_3.13.170018.tar.gz"
+ name2 := "config_3.15.157.tar.gz"
+
+ ver1, err := GetVersionFromName(name1)
+ require.NoError(t, err)
+ ver2, err := GetVersionFromName(name2)
+ require.NoError(t, err)
+
+ require.Less(t, ver1, ver2)
+}
+
func TestGetPartsFromVersion(t *testing.T) {
partitiontest.PartitionTest(t)
+ t.Parallel()
type args struct {
name string
@@ -207,12 +225,13 @@ func TestGetPartsFromVersion(t *testing.T) {
expPatch uint64
}
tests := []args{
- {name: "test 1 (major)", version: 1 * 1 << 32, expMajor: 1, expMinor: 0, expPatch: 0},
- {name: "test 2 (major)", version: 2 * 1 << 32, expMajor: 2, expMinor: 0, expPatch: 0},
- {name: "test 3 (minor)", version: 1*1<<32 + 1*1<<16, expMajor: 1, expMinor: 1, expPatch: 0},
- {name: "test 4 (minor)", version: 1*1<<32 + 2*1<<16, expMajor: 1, expMinor: 2, expPatch: 0},
- {name: "test 5 (patch)", version: 1*1<<32 + 1, expMajor: 1, expMinor: 0, expPatch: 1},
- {name: "test 6 (patch)", version: 1*1<<32 + 2, expMajor: 1, expMinor: 0, expPatch: 2},
+ {name: "test 1 (major)", version: 1 * 1 << 40, expMajor: 1, expMinor: 0, expPatch: 0},
+ {name: "test 2 (major)", version: 2 * 1 << 40, expMajor: 2, expMinor: 0, expPatch: 0},
+ {name: "test 3 (minor)", version: 1*1<<40 + 1*1<<24, expMajor: 1, expMinor: 1, expPatch: 0},
+ {name: "test 4 (minor)", version: 1*1<<40 + 2*1<<24, expMajor: 1, expMinor: 2, expPatch: 0},
+ {name: "test 5 (patch)", version: 1*1<<40 + 1, expMajor: 1, expMinor: 0, expPatch: 1},
+ {name: "test 6 (patch)", version: 1*1<<40 + 2, expMajor: 1, expMinor: 0, expPatch: 2},
+ {name: "test 6 (patch)", version: 3298803318784, expMajor: 3, expMinor: 16, expPatch: 0},
}
for _, test := range tests {
@@ -223,6 +242,35 @@ func TestGetPartsFromVersion(t *testing.T) {
require.Equal(t, test.expPatch, actualPatch, test.name)
}
- _, _, _, err := GetVersionPartsFromVersion(1<<32 - 1)
+ _, _, _, err := GetVersionPartsFromVersion(1<<40 - 1)
require.Error(t, err, "Versions less than 1.0.0 should not be parsed.")
}
+
+func TestGetPartsFromVersionEndToEnd(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ type args struct {
+ major uint64
+ minor uint64
+ patch uint64
+ }
+ tests := []args{
+ {major: 1, minor: 0, patch: 0},
+ {major: 3, minor: 13, patch: 170018},
+ {major: 3, minor: 15, patch: 157},
+ }
+
+ for _, test := range tests {
+ name := fmt.Sprintf("config_%d.%d.%d.tar.gz", test.major, test.minor, test.patch)
+ t.Run(name, func(t *testing.T) {
+ ver, err := GetVersionFromName(name)
+ require.NoError(t, err)
+ actualMajor, actualMinor, actualPatch, err := GetVersionPartsFromVersion(ver)
+ require.NoError(t, err)
+ require.Equal(t, test.major, actualMajor)
+ require.Equal(t, test.minor, actualMinor)
+ require.Equal(t, test.patch, actualPatch)
+ })
+ }
+}